-
# frozen_string_literal: true
-
-
1
require "base64"
-
1
require "mime-types"
-
1
require "openai"
-
-
1
require_relative "response"
-
-
1
module AI
-
# Main namespace.
-
1
class Chat
-
1
attr_accessor :messages, :model, :web_search, :previous_response_id
-
1
attr_reader :reasoning_effort, :client, :schema
-
-
1
VALID_REASONING_EFFORTS = [:low, :medium, :high].freeze
-
-
1
def initialize(api_key: nil, api_key_env_var: "OPENAI_API_KEY")
-
2
@api_key = api_key || ENV.fetch(api_key_env_var)
-
2
@messages = []
-
2
@reasoning_effort = nil
-
2
@model = "gpt-4.1-nano"
-
2
@client = OpenAI::Client.new(api_key: @api_key)
-
2
@previous_response_id = nil
-
end
-
-
1
def add(content, role: "user", response: nil, image: nil, images: nil, file: nil, files: nil)
-
4
then: 4
if image.nil? && images.nil? && file.nil? && files.nil?
-
4
messages.push(
-
{
-
role: role,
-
content: content,
-
response: response
-
}.compact
-
)
-
-
else: 0
else
-
text_and_files_array = [
-
{
-
type: "input_text",
-
text: content
-
}
-
]
-
-
then: 0
if images && !images.empty?
-
images_array = images.map do |image|
-
{
-
type: "input_image",
-
image_url: process_file(image)
-
}
-
end
-
-
else: 0
text_and_files_array += images_array
-
then: 0
elsif image
-
text_and_files_array.push(
-
{
-
type: "input_image",
-
image_url: process_file(image)
-
}
-
else: 0
)
-
then: 0
elsif files && !files.empty?
-
files_array = files.map do |file|
-
{
-
type: "input_file",
-
filename: "test",
-
file_data: process_file(file)
-
}
-
end
-
-
text_and_files_array += files_array
-
else: 0
else
-
text_and_files_array.push(
-
{
-
type: "input_file",
-
filename: "test",
-
file_data: process_file(file)
-
}
-
)
-
-
end
-
-
messages.push(
-
{
-
role: role,
-
content: text_and_files_array
-
}
-
)
-
end
-
end
-
-
1
def system(message)
-
2
add(message, role: "system")
-
end
-
-
1
def user(message, image: nil, images: nil, file: nil, files: nil)
-
2
add(message, role: "user", image: image, images: images, file: file, files: files)
-
end
-
-
1
def assistant(message, response: nil)
-
add(message, role: "assistant", response: response)
-
end
-
-
1
def generate!
-
2
response = create_response
-
-
chat_response = Response.new(response)
-
-
then: 0
message = if web_search
-
else: 0
response.output.last.content.first.text
-
elsif schema
-
then: 0
# filtering out refusals...
-
json_response = extract_text_from_response(response)
-
JSON.parse(json_response, symbolize_names: true)
-
else: 0
else
-
response.output.last.content.first.text
-
end
-
-
assistant(message, response: chat_response)
-
-
# Update previous_response_id for next request
-
self.previous_response_id = response.id
-
-
message
-
end
-
-
1
def reasoning_effort=(value)
-
then: 0
if value.nil?
-
@reasoning_effort = nil
-
else
-
else: 0
# Convert string to symbol if needed
-
then: 0
else: 0
symbol_value = value.is_a?(String) ? value.to_sym : value
-
-
then: 0
if VALID_REASONING_EFFORTS.include?(symbol_value)
-
@reasoning_effort = symbol_value
-
else: 0
else
-
valid_values = VALID_REASONING_EFFORTS.map { |v| ":#{v} or \"#{v}\"" }.join(", ")
-
raise ArgumentError, "Invalid reasoning_effort value: '#{value}'. Must be one of: #{valid_values}"
-
end
-
end
-
end
-
-
1
def schema=(value)
-
2
then: 1
if value.is_a?(String)
-
1
@schema = JSON.parse(value, symbolize_names: true)
-
1
else: 0
then: 1
unless @schema.key?(:format) || @schema.key?("format")
-
1
@schema = {format: @schema}
-
else: 1
end
-
1
then: 1
elsif value.is_a?(Hash)
-
1
then: 0
@schema = if value.key?(:format) || value.key?("format")
-
value
-
else: 1
else
-
1
{format: value}
-
end
-
else: 0
else
-
raise ArgumentError, "Invalid schema value: '#{value}'. Must be a String containing JSON or a Hash."
-
end
-
end
-
-
1
def last
-
messages.last
-
end
-
-
1
def inspect
-
"#<#{self.class.name} @messages=#{messages.inspect} @model=#{@model.inspect} @schema=#{@schema.inspect} @reasoning_effort=#{@reasoning_effort.inspect}>"
-
end
-
-
1
private
-
-
# Custom exception class for input classification errors.
-
1
class InputClassificationError < StandardError; end
-
-
1
def create_response
-
parameters = {
-
2
model: model,
-
tools: tools,
-
text: schema,
-
reasoning: {
-
effort: reasoning_effort
-
}.compact,
-
previous_response_id: previous_response_id
-
}.compact
-
-
# Determine which messages to send based on whether we're using previous_response_id
-
2
if previous_response_id
-
then: 0
# Find the index of the message with the matching response_id
-
then: 0
else: 0
previous_response_index = messages.find_index { |m| m[:response]&.id == previous_response_id }
-
-
if previous_response_index
-
then: 0
# Only send messages after the previous response
-
new_messages = messages[(previous_response_index + 1)..]
-
else: 0
then: 0
parameters[:input] = strip_responses(new_messages) unless new_messages.empty?
-
else
-
else: 0
# If we can't find the previous response, send all messages
-
parameters[:input] = strip_responses(messages)
-
end
-
else
-
else: 2
# Send full message history when not using previous_response_id
-
2
parameters[:input] = strip_responses(messages)
-
end
-
-
12
parameters = parameters.delete_if { |k, v| v.empty? }
-
2
client.responses.create(**parameters)
-
end
-
-
1
def classify_obj(obj)
-
if obj.is_a?(String)
-
# Attempt to parse as a URL.
-
then: 0
begin
-
uri = URI.parse(obj)
-
then: 0
else: 0
if uri.is_a?(URI::HTTP) || uri.is_a?(URI::HTTPS)
-
return :url
-
end
-
rescue URI::InvalidURIError
-
# Not a valid URL; continue to check if it's a file path.
-
end
-
-
# Check if the string represents a local file path (must exist on disk).
-
then: 0
if File.exist?(obj)
-
:file_path
-
else: 0
else
-
raise InputClassificationError,
-
"String provided is neither a valid URL (must start with http:// or https://) nor an existing file path on disk. Received value: #{obj.inspect}"
-
else: 0
end
-
elsif obj.respond_to?(:read)
-
then: 0
# For non-String objects, check if it behaves like a file.
-
:file_like
-
else: 0
else
-
raise InputClassificationError,
-
"Object provided is neither a String nor file-like (missing :read method). Received value: #{obj.inspect}"
-
end
-
end
-
-
1
def process_file(obj)
-
else: 0
case classify_obj(obj)
-
when: 0
when :url
-
obj
-
when: 0
when :file_path
-
file_path = obj
-
-
mime_type = MIME::Types.type_for(file_path).first.to_s
-
-
image_data = File.binread(file_path)
-
-
base64_string = Base64.strict_encode64(image_data)
-
-
"data:#{mime_type};base64,#{base64_string}"
-
when: 0
when :file_like
-
then: 0
filename = if obj.respond_to?(:path)
-
else: 0
obj.path
-
then: 0
elsif obj.respond_to?(:original_filename)
-
obj.original_filename
-
else: 0
else
-
"unknown"
-
end
-
-
mime_type = MIME::Types.type_for(filename).first.to_s
-
then: 0
else: 0
mime_type = "image/jpeg" if mime_type.empty?
-
-
file_data = obj.read
-
then: 0
else: 0
obj.rewind if obj.respond_to?(:rewind)
-
-
base64_string = Base64.strict_encode64(file_data)
-
-
"data:#{mime_type};base64,#{base64_string}"
-
end
-
end
-
-
1
def strip_responses(messages)
-
2
messages.each do |message|
-
4
then: 0
else: 4
message.delete(:response) if message.key?(:response)
-
4
then: 0
else: 4
message[:content] = JSON.generate(message[:content]) if message[:content].is_a?(Hash)
-
end
-
end
-
-
1
def tools
-
2
tools_list = []
-
2
then: 0
else: 2
if web_search
-
tools_list << {type: "web_search_preview"}
-
end
-
2
tools_list
-
end
-
-
1
def extract_text_from_response(response)
-
response.output.flat_map { it.content }.select { it.is_a?(OpenAI::Models::Responses::ResponseOutputText) }.first.text
-
end
-
end
-
end
-
1
module AI
-
1
class Response
-
1
attr_reader :id, :model, :usage, :total_tokens
-
-
1
def initialize(response)
-
@id = response.id
-
@model = response.model
-
@usage = response.usage.to_h.slice(:input_tokens, :output_tokens, :total_tokens)
-
@total_tokens = @usage[:total_tokens]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
# Standard libraries.
-
1
require "English"
-
1
require "cgi"
-
1
require "date"
-
1
require "erb"
-
1
require "etc"
-
1
require "json"
-
1
require "net/http"
-
1
require "pathname"
-
1
require "rbconfig"
-
1
require "securerandom"
-
1
require "stringio"
-
1
require "time"
-
1
require "uri"
-
# We already ship the preferred sorbet manifests in the package itself.
-
# `tapioca` currently does not offer us a way to opt out of unnecessary compilation.
-
1
else: 1
if Object.const_defined?(:Tapioca) &&
-
caller.chain([$PROGRAM_NAME]).chain(ARGV).any?(/tapioca/) &&
-
then: 0
ARGV.none?(/dsl/)
-
return
-
end
-
-
# Gems.
-
1
require "connection_pool"
-
-
# Package files.
-
1
require_relative "openai/version"
-
1
require_relative "openai/internal/util"
-
1
require_relative "openai/internal/type/converter"
-
1
require_relative "openai/internal/type/unknown"
-
1
require_relative "openai/internal/type/boolean"
-
1
require_relative "openai/internal/type/file_input"
-
1
require_relative "openai/internal/type/enum"
-
1
require_relative "openai/internal/type/union"
-
1
require_relative "openai/internal/type/array_of"
-
1
require_relative "openai/internal/type/hash_of"
-
1
require_relative "openai/internal/type/base_model"
-
1
require_relative "openai/internal/type/base_page"
-
1
require_relative "openai/internal/type/base_stream"
-
1
require_relative "openai/internal/type/request_parameters"
-
1
require_relative "openai/internal"
-
1
require_relative "openai/request_options"
-
1
require_relative "openai/file_part"
-
1
require_relative "openai/errors"
-
1
require_relative "openai/internal/transport/base_client"
-
1
require_relative "openai/internal/transport/pooled_net_requester"
-
1
require_relative "openai/client"
-
1
require_relative "openai/internal/stream"
-
1
require_relative "openai/internal/cursor_page"
-
1
require_relative "openai/internal/page"
-
1
require_relative "openai/helpers/structured_output/json_schema_converter"
-
1
require_relative "openai/helpers/structured_output/boolean"
-
1
require_relative "openai/helpers/structured_output/enum_of"
-
1
require_relative "openai/helpers/structured_output/union_of"
-
1
require_relative "openai/helpers/structured_output/array_of"
-
1
require_relative "openai/helpers/structured_output/base_model"
-
1
require_relative "openai/helpers/structured_output/parsed_json"
-
1
require_relative "openai/helpers/structured_output"
-
1
require_relative "openai/structured_output"
-
1
require_relative "openai/models/reasoning_effort"
-
1
require_relative "openai/models/chat/chat_completion_message"
-
1
require_relative "openai/models/graders/score_model_grader"
-
1
require_relative "openai/models/graders/python_grader"
-
1
require_relative "openai/models/graders/text_similarity_grader"
-
1
require_relative "openai/models/fine_tuning/fine_tuning_job_wandb_integration_object"
-
1
require_relative "openai/models/responses/response_function_tool_call"
-
1
require_relative "openai/models/all_models"
-
1
require_relative "openai/models/audio/speech_create_params"
-
1
require_relative "openai/models/audio/speech_model"
-
1
require_relative "openai/models/audio/transcription"
-
1
require_relative "openai/models/audio/transcription_create_params"
-
1
require_relative "openai/models/audio/transcription_create_response"
-
1
require_relative "openai/models/audio/transcription_include"
-
1
require_relative "openai/models/audio/transcription_segment"
-
1
require_relative "openai/models/audio/transcription_stream_event"
-
1
require_relative "openai/models/audio/transcription_text_delta_event"
-
1
require_relative "openai/models/audio/transcription_text_done_event"
-
1
require_relative "openai/models/audio/transcription_verbose"
-
1
require_relative "openai/models/audio/transcription_word"
-
1
require_relative "openai/models/audio/translation"
-
1
require_relative "openai/models/audio/translation_create_params"
-
1
require_relative "openai/models/audio/translation_create_response"
-
1
require_relative "openai/models/audio/translation_verbose"
-
1
require_relative "openai/models/audio_model"
-
1
require_relative "openai/models/audio_response_format"
-
1
require_relative "openai/models/auto_file_chunking_strategy_param"
-
1
require_relative "openai/models/batch"
-
1
require_relative "openai/models/batch_cancel_params"
-
1
require_relative "openai/models/batch_create_params"
-
1
require_relative "openai/models/batch_error"
-
1
require_relative "openai/models/batch_list_params"
-
1
require_relative "openai/models/batch_request_counts"
-
1
require_relative "openai/models/batch_retrieve_params"
-
1
require_relative "openai/models/beta/assistant"
-
1
require_relative "openai/models/beta/assistant_create_params"
-
1
require_relative "openai/models/beta/assistant_deleted"
-
1
require_relative "openai/models/beta/assistant_delete_params"
-
1
require_relative "openai/models/beta/assistant_list_params"
-
1
require_relative "openai/models/beta/assistant_response_format_option"
-
1
require_relative "openai/models/beta/assistant_retrieve_params"
-
1
require_relative "openai/models/beta/assistant_stream_event"
-
1
require_relative "openai/models/beta/assistant_tool"
-
1
require_relative "openai/models/beta/assistant_tool_choice"
-
1
require_relative "openai/models/beta/assistant_tool_choice_function"
-
1
require_relative "openai/models/beta/assistant_tool_choice_option"
-
1
require_relative "openai/models/beta/assistant_update_params"
-
1
require_relative "openai/models/beta/code_interpreter_tool"
-
1
require_relative "openai/models/beta/file_search_tool"
-
1
require_relative "openai/models/beta/function_tool"
-
1
require_relative "openai/models/beta/message_stream_event"
-
1
require_relative "openai/models/beta/run_step_stream_event"
-
1
require_relative "openai/models/beta/run_stream_event"
-
1
require_relative "openai/models/beta/thread"
-
1
require_relative "openai/models/beta/thread_create_and_run_params"
-
1
require_relative "openai/models/beta/thread_create_params"
-
1
require_relative "openai/models/beta/thread_deleted"
-
1
require_relative "openai/models/beta/thread_delete_params"
-
1
require_relative "openai/models/beta/thread_retrieve_params"
-
1
require_relative "openai/models/beta/threads/annotation"
-
1
require_relative "openai/models/beta/threads/annotation_delta"
-
1
require_relative "openai/models/beta/threads/file_citation_annotation"
-
1
require_relative "openai/models/beta/threads/file_citation_delta_annotation"
-
1
require_relative "openai/models/beta/threads/file_path_annotation"
-
1
require_relative "openai/models/beta/threads/file_path_delta_annotation"
-
1
require_relative "openai/models/beta/threads/image_file"
-
1
require_relative "openai/models/beta/threads/image_file_content_block"
-
1
require_relative "openai/models/beta/threads/image_file_delta"
-
1
require_relative "openai/models/beta/threads/image_file_delta_block"
-
1
require_relative "openai/models/beta/threads/image_url"
-
1
require_relative "openai/models/beta/threads/image_url_content_block"
-
1
require_relative "openai/models/beta/threads/image_url_delta"
-
1
require_relative "openai/models/beta/threads/image_url_delta_block"
-
1
require_relative "openai/models/beta/threads/message"
-
1
require_relative "openai/models/beta/threads/message_content"
-
1
require_relative "openai/models/beta/threads/message_content_delta"
-
1
require_relative "openai/models/beta/threads/message_content_part_param"
-
1
require_relative "openai/models/beta/threads/message_create_params"
-
1
require_relative "openai/models/beta/threads/message_deleted"
-
1
require_relative "openai/models/beta/threads/message_delete_params"
-
1
require_relative "openai/models/beta/threads/message_delta"
-
1
require_relative "openai/models/beta/threads/message_delta_event"
-
1
require_relative "openai/models/beta/threads/message_list_params"
-
1
require_relative "openai/models/beta/threads/message_retrieve_params"
-
1
require_relative "openai/models/beta/threads/message_update_params"
-
1
require_relative "openai/models/beta/threads/refusal_content_block"
-
1
require_relative "openai/models/beta/threads/refusal_delta_block"
-
1
require_relative "openai/models/beta/threads/required_action_function_tool_call"
-
1
require_relative "openai/models/beta/threads/run"
-
1
require_relative "openai/models/beta/threads/run_cancel_params"
-
1
require_relative "openai/models/beta/threads/run_create_params"
-
1
require_relative "openai/models/beta/threads/run_list_params"
-
1
require_relative "openai/models/beta/threads/run_retrieve_params"
-
1
require_relative "openai/models/beta/threads/runs/code_interpreter_logs"
-
1
require_relative "openai/models/beta/threads/runs/code_interpreter_output_image"
-
1
require_relative "openai/models/beta/threads/runs/code_interpreter_tool_call"
-
1
require_relative "openai/models/beta/threads/runs/code_interpreter_tool_call_delta"
-
1
require_relative "openai/models/beta/threads/runs/file_search_tool_call"
-
1
require_relative "openai/models/beta/threads/runs/file_search_tool_call_delta"
-
1
require_relative "openai/models/beta/threads/runs/function_tool_call"
-
1
require_relative "openai/models/beta/threads/runs/function_tool_call_delta"
-
1
require_relative "openai/models/beta/threads/runs/message_creation_step_details"
-
1
require_relative "openai/models/beta/threads/runs/run_step"
-
1
require_relative "openai/models/beta/threads/runs/run_step_delta"
-
1
require_relative "openai/models/beta/threads/runs/run_step_delta_event"
-
1
require_relative "openai/models/beta/threads/runs/run_step_delta_message_delta"
-
1
require_relative "openai/models/beta/threads/runs/run_step_include"
-
1
require_relative "openai/models/beta/threads/runs/step_list_params"
-
1
require_relative "openai/models/beta/threads/runs/step_retrieve_params"
-
1
require_relative "openai/models/beta/threads/runs/tool_call"
-
1
require_relative "openai/models/beta/threads/runs/tool_call_delta"
-
1
require_relative "openai/models/beta/threads/runs/tool_call_delta_object"
-
1
require_relative "openai/models/beta/threads/runs/tool_calls_step_details"
-
1
require_relative "openai/models/beta/threads/run_status"
-
1
require_relative "openai/models/beta/threads/run_submit_tool_outputs_params"
-
1
require_relative "openai/models/beta/threads/run_update_params"
-
1
require_relative "openai/models/beta/threads/text"
-
1
require_relative "openai/models/beta/threads/text_content_block"
-
1
require_relative "openai/models/beta/threads/text_content_block_param"
-
1
require_relative "openai/models/beta/threads/text_delta"
-
1
require_relative "openai/models/beta/threads/text_delta_block"
-
1
require_relative "openai/models/beta/thread_stream_event"
-
1
require_relative "openai/models/beta/thread_update_params"
-
1
require_relative "openai/models/chat/chat_completion"
-
1
require_relative "openai/models/chat/chat_completion_assistant_message_param"
-
1
require_relative "openai/models/chat/chat_completion_audio"
-
1
require_relative "openai/models/chat/chat_completion_audio_param"
-
1
require_relative "openai/models/chat/chat_completion_chunk"
-
1
require_relative "openai/models/chat/chat_completion_content_part"
-
1
require_relative "openai/models/chat/chat_completion_content_part_image"
-
1
require_relative "openai/models/chat/chat_completion_content_part_input_audio"
-
1
require_relative "openai/models/chat/chat_completion_content_part_refusal"
-
1
require_relative "openai/models/chat/chat_completion_content_part_text"
-
1
require_relative "openai/models/chat/chat_completion_deleted"
-
1
require_relative "openai/models/chat/chat_completion_developer_message_param"
-
1
require_relative "openai/models/chat/chat_completion_function_call_option"
-
1
require_relative "openai/models/chat/chat_completion_function_message_param"
-
1
require_relative "openai/models/chat/chat_completion_message_param"
-
1
require_relative "openai/models/chat/chat_completion_message_tool_call"
-
1
require_relative "openai/models/chat/chat_completion_modality"
-
1
require_relative "openai/models/chat/chat_completion_named_tool_choice"
-
1
require_relative "openai/models/chat/chat_completion_prediction_content"
-
1
require_relative "openai/models/chat/chat_completion_reasoning_effort"
-
1
require_relative "openai/models/chat/chat_completion_role"
-
1
require_relative "openai/models/chat/chat_completion_store_message"
-
1
require_relative "openai/models/chat/chat_completion_stream_options"
-
1
require_relative "openai/models/chat/chat_completion_system_message_param"
-
1
require_relative "openai/models/chat/chat_completion_token_logprob"
-
1
require_relative "openai/models/chat/chat_completion_tool"
-
1
require_relative "openai/models/chat/chat_completion_tool_choice_option"
-
1
require_relative "openai/models/chat/chat_completion_tool_message_param"
-
1
require_relative "openai/models/chat/chat_completion_user_message_param"
-
1
require_relative "openai/models/chat/completion_create_params"
-
1
require_relative "openai/models/chat/completion_delete_params"
-
1
require_relative "openai/models/chat/completion_list_params"
-
1
require_relative "openai/models/chat/completion_retrieve_params"
-
1
require_relative "openai/models/chat/completions/message_list_params"
-
1
require_relative "openai/models/chat/completion_update_params"
-
1
require_relative "openai/models/chat_model"
-
1
require_relative "openai/models/comparison_filter"
-
1
require_relative "openai/models/completion"
-
1
require_relative "openai/models/completion_choice"
-
1
require_relative "openai/models/completion_create_params"
-
1
require_relative "openai/models/completion_usage"
-
1
require_relative "openai/models/compound_filter"
-
1
require_relative "openai/models/container_create_params"
-
1
require_relative "openai/models/container_create_response"
-
1
require_relative "openai/models/container_delete_params"
-
1
require_relative "openai/models/container_list_params"
-
1
require_relative "openai/models/container_list_response"
-
1
require_relative "openai/models/container_retrieve_params"
-
1
require_relative "openai/models/container_retrieve_response"
-
1
require_relative "openai/models/containers/file_create_params"
-
1
require_relative "openai/models/containers/file_create_response"
-
1
require_relative "openai/models/containers/file_delete_params"
-
1
require_relative "openai/models/containers/file_list_params"
-
1
require_relative "openai/models/containers/file_list_response"
-
1
require_relative "openai/models/containers/file_retrieve_params"
-
1
require_relative "openai/models/containers/file_retrieve_response"
-
1
require_relative "openai/models/containers/files/content_retrieve_params"
-
1
require_relative "openai/models/create_embedding_response"
-
1
require_relative "openai/models/embedding"
-
1
require_relative "openai/models/embedding_create_params"
-
1
require_relative "openai/models/embedding_model"
-
1
require_relative "openai/models/error_object"
-
1
require_relative "openai/models/eval_create_params"
-
1
require_relative "openai/models/eval_create_response"
-
1
require_relative "openai/models/eval_custom_data_source_config"
-
1
require_relative "openai/models/eval_delete_params"
-
1
require_relative "openai/models/eval_delete_response"
-
1
require_relative "openai/models/eval_list_params"
-
1
require_relative "openai/models/eval_list_response"
-
1
require_relative "openai/models/eval_retrieve_params"
-
1
require_relative "openai/models/eval_retrieve_response"
-
1
require_relative "openai/models/evals/create_eval_completions_run_data_source"
-
1
require_relative "openai/models/evals/create_eval_jsonl_run_data_source"
-
1
require_relative "openai/models/evals/eval_api_error"
-
1
require_relative "openai/models/evals/run_cancel_params"
-
1
require_relative "openai/models/evals/run_cancel_response"
-
1
require_relative "openai/models/evals/run_create_params"
-
1
require_relative "openai/models/evals/run_create_response"
-
1
require_relative "openai/models/evals/run_delete_params"
-
1
require_relative "openai/models/evals/run_delete_response"
-
1
require_relative "openai/models/evals/run_list_params"
-
1
require_relative "openai/models/evals/run_list_response"
-
1
require_relative "openai/models/evals/run_retrieve_params"
-
1
require_relative "openai/models/evals/run_retrieve_response"
-
1
require_relative "openai/models/evals/runs/output_item_list_params"
-
1
require_relative "openai/models/evals/runs/output_item_list_response"
-
1
require_relative "openai/models/evals/runs/output_item_retrieve_params"
-
1
require_relative "openai/models/evals/runs/output_item_retrieve_response"
-
1
require_relative "openai/models/eval_stored_completions_data_source_config"
-
1
require_relative "openai/models/eval_update_params"
-
1
require_relative "openai/models/eval_update_response"
-
1
require_relative "openai/models/file_chunking_strategy"
-
1
require_relative "openai/models/file_chunking_strategy_param"
-
1
require_relative "openai/models/file_content"
-
1
require_relative "openai/models/file_content_params"
-
1
require_relative "openai/models/file_create_params"
-
1
require_relative "openai/models/file_deleted"
-
1
require_relative "openai/models/file_delete_params"
-
1
require_relative "openai/models/file_list_params"
-
1
require_relative "openai/models/file_object"
-
1
require_relative "openai/models/file_purpose"
-
1
require_relative "openai/models/file_retrieve_params"
-
1
require_relative "openai/models/fine_tuning/alpha/grader_run_params"
-
1
require_relative "openai/models/fine_tuning/alpha/grader_run_response"
-
1
require_relative "openai/models/fine_tuning/alpha/grader_validate_params"
-
1
require_relative "openai/models/fine_tuning/alpha/grader_validate_response"
-
1
require_relative "openai/models/fine_tuning/checkpoints/permission_create_params"
-
1
require_relative "openai/models/fine_tuning/checkpoints/permission_create_response"
-
1
require_relative "openai/models/fine_tuning/checkpoints/permission_delete_params"
-
1
require_relative "openai/models/fine_tuning/checkpoints/permission_delete_response"
-
1
require_relative "openai/models/fine_tuning/checkpoints/permission_retrieve_params"
-
1
require_relative "openai/models/fine_tuning/checkpoints/permission_retrieve_response"
-
1
require_relative "openai/models/fine_tuning/dpo_hyperparameters"
-
1
require_relative "openai/models/fine_tuning/dpo_method"
-
1
require_relative "openai/models/fine_tuning/fine_tuning_job"
-
1
require_relative "openai/models/fine_tuning/fine_tuning_job_event"
-
1
require_relative "openai/models/fine_tuning/fine_tuning_job_integration"
-
1
require_relative "openai/models/fine_tuning/fine_tuning_job_wandb_integration"
-
1
require_relative "openai/models/fine_tuning/job_cancel_params"
-
1
require_relative "openai/models/fine_tuning/job_create_params"
-
1
require_relative "openai/models/fine_tuning/job_list_events_params"
-
1
require_relative "openai/models/fine_tuning/job_list_params"
-
1
require_relative "openai/models/fine_tuning/job_pause_params"
-
1
require_relative "openai/models/fine_tuning/job_resume_params"
-
1
require_relative "openai/models/fine_tuning/job_retrieve_params"
-
1
require_relative "openai/models/fine_tuning/jobs/checkpoint_list_params"
-
1
require_relative "openai/models/fine_tuning/jobs/fine_tuning_job_checkpoint"
-
1
require_relative "openai/models/fine_tuning/reinforcement_hyperparameters"
-
1
require_relative "openai/models/fine_tuning/reinforcement_method"
-
1
require_relative "openai/models/fine_tuning/supervised_hyperparameters"
-
1
require_relative "openai/models/fine_tuning/supervised_method"
-
1
require_relative "openai/models/function_definition"
-
1
require_relative "openai/models/function_parameters"
-
1
require_relative "openai/models/graders/label_model_grader"
-
1
require_relative "openai/models/graders/multi_grader"
-
1
require_relative "openai/models/graders/string_check_grader"
-
1
require_relative "openai/models/image"
-
1
require_relative "openai/models/image_create_variation_params"
-
1
require_relative "openai/models/image_edit_completed_event"
-
1
require_relative "openai/models/image_edit_params"
-
1
require_relative "openai/models/image_edit_partial_image_event"
-
1
require_relative "openai/models/image_edit_stream_event"
-
1
require_relative "openai/models/image_gen_completed_event"
-
1
require_relative "openai/models/image_generate_params"
-
1
require_relative "openai/models/image_gen_partial_image_event"
-
1
require_relative "openai/models/image_gen_stream_event"
-
1
require_relative "openai/models/image_model"
-
1
require_relative "openai/models/images_response"
-
1
require_relative "openai/models/metadata"
-
1
require_relative "openai/models/model"
-
1
require_relative "openai/models/model_deleted"
-
1
require_relative "openai/models/model_delete_params"
-
1
require_relative "openai/models/model_list_params"
-
1
require_relative "openai/models/model_retrieve_params"
-
1
require_relative "openai/models/moderation"
-
1
require_relative "openai/models/moderation_create_params"
-
1
require_relative "openai/models/moderation_create_response"
-
1
require_relative "openai/models/moderation_image_url_input"
-
1
require_relative "openai/models/moderation_model"
-
1
require_relative "openai/models/moderation_multi_modal_input"
-
1
require_relative "openai/models/moderation_text_input"
-
1
require_relative "openai/models/other_file_chunking_strategy_object"
-
1
require_relative "openai/models/reasoning"
-
1
require_relative "openai/models/response_format_json_object"
-
1
require_relative "openai/models/response_format_json_schema"
-
1
require_relative "openai/models/response_format_text"
-
1
require_relative "openai/models/responses/computer_tool"
-
1
require_relative "openai/models/responses/easy_input_message"
-
1
require_relative "openai/models/responses/file_search_tool"
-
1
require_relative "openai/models/responses/function_tool"
-
1
require_relative "openai/models/responses/input_item_list_params"
-
1
require_relative "openai/models/responses/response"
-
1
require_relative "openai/models/responses/response_audio_delta_event"
-
1
require_relative "openai/models/responses/response_audio_done_event"
-
1
require_relative "openai/models/responses/response_audio_transcript_delta_event"
-
1
require_relative "openai/models/responses/response_audio_transcript_done_event"
-
1
require_relative "openai/models/responses/response_cancel_params"
-
1
require_relative "openai/models/responses/response_code_interpreter_call_code_delta_event"
-
1
require_relative "openai/models/responses/response_code_interpreter_call_code_done_event"
-
1
require_relative "openai/models/responses/response_code_interpreter_call_completed_event"
-
1
require_relative "openai/models/responses/response_code_interpreter_call_in_progress_event"
-
1
require_relative "openai/models/responses/response_code_interpreter_call_interpreting_event"
-
1
require_relative "openai/models/responses/response_code_interpreter_tool_call"
-
1
require_relative "openai/models/responses/response_completed_event"
-
1
require_relative "openai/models/responses/response_computer_tool_call"
-
1
require_relative "openai/models/responses/response_computer_tool_call_output_item"
-
1
require_relative "openai/models/responses/response_computer_tool_call_output_screenshot"
-
1
require_relative "openai/models/responses/response_content"
-
1
require_relative "openai/models/responses/response_content_part_added_event"
-
1
require_relative "openai/models/responses/response_content_part_done_event"
-
1
require_relative "openai/models/responses/response_created_event"
-
1
require_relative "openai/models/responses/response_create_params"
-
1
require_relative "openai/models/responses/response_delete_params"
-
1
require_relative "openai/models/responses/response_error"
-
1
require_relative "openai/models/responses/response_error_event"
-
1
require_relative "openai/models/responses/response_failed_event"
-
1
require_relative "openai/models/responses/response_file_search_call_completed_event"
-
1
require_relative "openai/models/responses/response_file_search_call_in_progress_event"
-
1
require_relative "openai/models/responses/response_file_search_call_searching_event"
-
1
require_relative "openai/models/responses/response_file_search_tool_call"
-
1
require_relative "openai/models/responses/response_format_text_config"
-
1
require_relative "openai/models/responses/response_format_text_json_schema_config"
-
1
require_relative "openai/models/responses/response_function_call_arguments_delta_event"
-
1
require_relative "openai/models/responses/response_function_call_arguments_done_event"
-
1
require_relative "openai/models/responses/response_function_tool_call_item"
-
1
require_relative "openai/models/responses/response_function_tool_call_output_item"
-
1
require_relative "openai/models/responses/response_function_web_search"
-
1
require_relative "openai/models/responses/response_image_gen_call_completed_event"
-
1
require_relative "openai/models/responses/response_image_gen_call_generating_event"
-
1
require_relative "openai/models/responses/response_image_gen_call_in_progress_event"
-
1
require_relative "openai/models/responses/response_image_gen_call_partial_image_event"
-
1
require_relative "openai/models/responses/response_includable"
-
1
require_relative "openai/models/responses/response_incomplete_event"
-
1
require_relative "openai/models/responses/response_in_progress_event"
-
1
require_relative "openai/models/responses/response_input"
-
1
require_relative "openai/models/responses/response_input_audio"
-
1
require_relative "openai/models/responses/response_input_content"
-
1
require_relative "openai/models/responses/response_input_file"
-
1
require_relative "openai/models/responses/response_input_image"
-
1
require_relative "openai/models/responses/response_input_item"
-
1
require_relative "openai/models/responses/response_input_message_content_list"
-
1
require_relative "openai/models/responses/response_input_message_item"
-
1
require_relative "openai/models/responses/response_input_text"
-
1
require_relative "openai/models/responses/response_item"
-
1
require_relative "openai/models/responses/response_item_list"
-
1
require_relative "openai/models/responses/response_mcp_call_arguments_delta_event"
-
1
require_relative "openai/models/responses/response_mcp_call_arguments_done_event"
-
1
require_relative "openai/models/responses/response_mcp_call_completed_event"
-
1
require_relative "openai/models/responses/response_mcp_call_failed_event"
-
1
require_relative "openai/models/responses/response_mcp_call_in_progress_event"
-
1
require_relative "openai/models/responses/response_mcp_list_tools_completed_event"
-
1
require_relative "openai/models/responses/response_mcp_list_tools_failed_event"
-
1
require_relative "openai/models/responses/response_mcp_list_tools_in_progress_event"
-
1
require_relative "openai/models/responses/response_output_audio"
-
1
require_relative "openai/models/responses/response_output_item"
-
1
require_relative "openai/models/responses/response_output_item_added_event"
-
1
require_relative "openai/models/responses/response_output_item_done_event"
-
1
require_relative "openai/models/responses/response_output_message"
-
1
require_relative "openai/models/responses/response_output_refusal"
-
1
require_relative "openai/models/responses/response_output_text"
-
1
require_relative "openai/models/responses/response_output_text_annotation_added_event"
-
1
require_relative "openai/models/responses/response_prompt"
-
1
require_relative "openai/models/responses/response_queued_event"
-
1
require_relative "openai/models/responses/response_reasoning_item"
-
1
require_relative "openai/models/responses/response_reasoning_summary_delta_event"
-
1
require_relative "openai/models/responses/response_reasoning_summary_done_event"
-
1
require_relative "openai/models/responses/response_reasoning_summary_part_added_event"
-
1
require_relative "openai/models/responses/response_reasoning_summary_part_done_event"
-
1
require_relative "openai/models/responses/response_reasoning_summary_text_delta_event"
-
1
require_relative "openai/models/responses/response_reasoning_summary_text_done_event"
-
1
require_relative "openai/models/responses/response_refusal_delta_event"
-
1
require_relative "openai/models/responses/response_refusal_done_event"
-
1
require_relative "openai/models/responses/response_retrieve_params"
-
1
require_relative "openai/models/responses/response_status"
-
1
require_relative "openai/models/responses/response_stream_event"
-
1
require_relative "openai/models/responses/response_text_config"
-
1
require_relative "openai/models/responses/response_text_delta_event"
-
1
require_relative "openai/models/responses/response_text_done_event"
-
1
require_relative "openai/models/responses/response_usage"
-
1
require_relative "openai/models/responses/response_web_search_call_completed_event"
-
1
require_relative "openai/models/responses/response_web_search_call_in_progress_event"
-
1
require_relative "openai/models/responses/response_web_search_call_searching_event"
-
1
require_relative "openai/models/responses/tool"
-
1
require_relative "openai/models/responses/tool_choice_function"
-
1
require_relative "openai/models/responses/tool_choice_mcp"
-
1
require_relative "openai/models/responses/tool_choice_options"
-
1
require_relative "openai/models/responses/tool_choice_types"
-
1
require_relative "openai/models/responses/web_search_tool"
-
1
require_relative "openai/models/responses_model"
-
1
require_relative "openai/models/static_file_chunking_strategy"
-
1
require_relative "openai/models/static_file_chunking_strategy_object"
-
1
require_relative "openai/models/static_file_chunking_strategy_object_param"
-
1
require_relative "openai/models/upload"
-
1
require_relative "openai/models/upload_cancel_params"
-
1
require_relative "openai/models/upload_complete_params"
-
1
require_relative "openai/models/upload_create_params"
-
1
require_relative "openai/models/uploads/part_create_params"
-
1
require_relative "openai/models/uploads/upload_part"
-
1
require_relative "openai/models/vector_store"
-
1
require_relative "openai/models/vector_store_create_params"
-
1
require_relative "openai/models/vector_store_deleted"
-
1
require_relative "openai/models/vector_store_delete_params"
-
1
require_relative "openai/models/vector_store_list_params"
-
1
require_relative "openai/models/vector_store_retrieve_params"
-
1
require_relative "openai/models/vector_stores/file_batch_cancel_params"
-
1
require_relative "openai/models/vector_stores/file_batch_create_params"
-
1
require_relative "openai/models/vector_stores/file_batch_list_files_params"
-
1
require_relative "openai/models/vector_stores/file_batch_retrieve_params"
-
1
require_relative "openai/models/vector_stores/file_content_params"
-
1
require_relative "openai/models/vector_stores/file_content_response"
-
1
require_relative "openai/models/vector_stores/file_create_params"
-
1
require_relative "openai/models/vector_stores/file_delete_params"
-
1
require_relative "openai/models/vector_stores/file_list_params"
-
1
require_relative "openai/models/vector_stores/file_retrieve_params"
-
1
require_relative "openai/models/vector_stores/file_update_params"
-
1
require_relative "openai/models/vector_stores/vector_store_file"
-
1
require_relative "openai/models/vector_stores/vector_store_file_batch"
-
1
require_relative "openai/models/vector_stores/vector_store_file_deleted"
-
1
require_relative "openai/models/vector_store_search_params"
-
1
require_relative "openai/models/vector_store_search_response"
-
1
require_relative "openai/models/vector_store_update_params"
-
1
require_relative "openai/models/webhooks/batch_cancelled_webhook_event"
-
1
require_relative "openai/models/webhooks/batch_completed_webhook_event"
-
1
require_relative "openai/models/webhooks/batch_expired_webhook_event"
-
1
require_relative "openai/models/webhooks/batch_failed_webhook_event"
-
1
require_relative "openai/models/webhooks/eval_run_canceled_webhook_event"
-
1
require_relative "openai/models/webhooks/eval_run_failed_webhook_event"
-
1
require_relative "openai/models/webhooks/eval_run_succeeded_webhook_event"
-
1
require_relative "openai/models/webhooks/fine_tuning_job_cancelled_webhook_event"
-
1
require_relative "openai/models/webhooks/fine_tuning_job_failed_webhook_event"
-
1
require_relative "openai/models/webhooks/fine_tuning_job_succeeded_webhook_event"
-
1
require_relative "openai/models/webhooks/response_cancelled_webhook_event"
-
1
require_relative "openai/models/webhooks/response_completed_webhook_event"
-
1
require_relative "openai/models/webhooks/response_failed_webhook_event"
-
1
require_relative "openai/models/webhooks/response_incomplete_webhook_event"
-
1
require_relative "openai/models/webhooks/unwrap_webhook_event"
-
1
require_relative "openai/models/webhooks/webhook_unwrap_params"
-
1
require_relative "openai/models"
-
1
require_relative "openai/resources/audio"
-
1
require_relative "openai/resources/audio/speech"
-
1
require_relative "openai/resources/audio/transcriptions"
-
1
require_relative "openai/resources/audio/translations"
-
1
require_relative "openai/resources/batches"
-
1
require_relative "openai/resources/beta"
-
1
require_relative "openai/resources/beta/assistants"
-
1
require_relative "openai/resources/beta/threads"
-
1
require_relative "openai/resources/beta/threads/messages"
-
1
require_relative "openai/resources/beta/threads/runs"
-
1
require_relative "openai/resources/beta/threads/runs/steps"
-
1
require_relative "openai/resources/chat"
-
1
require_relative "openai/resources/chat/completions"
-
1
require_relative "openai/resources/chat/completions/messages"
-
1
require_relative "openai/resources/completions"
-
1
require_relative "openai/resources/containers"
-
1
require_relative "openai/resources/containers/files"
-
1
require_relative "openai/resources/containers/files/content"
-
1
require_relative "openai/resources/embeddings"
-
1
require_relative "openai/resources/evals"
-
1
require_relative "openai/resources/evals/runs"
-
1
require_relative "openai/resources/evals/runs/output_items"
-
1
require_relative "openai/resources/files"
-
1
require_relative "openai/resources/fine_tuning"
-
1
require_relative "openai/resources/fine_tuning/alpha"
-
1
require_relative "openai/resources/fine_tuning/alpha/graders"
-
1
require_relative "openai/resources/fine_tuning/checkpoints"
-
1
require_relative "openai/resources/fine_tuning/checkpoints/permissions"
-
1
require_relative "openai/resources/fine_tuning/jobs"
-
1
require_relative "openai/resources/fine_tuning/jobs/checkpoints"
-
1
require_relative "openai/resources/fine_tuning/methods"
-
1
require_relative "openai/resources/graders"
-
1
require_relative "openai/resources/graders/grader_models"
-
1
require_relative "openai/resources/images"
-
1
require_relative "openai/resources/models"
-
1
require_relative "openai/resources/moderations"
-
1
require_relative "openai/resources/responses"
-
1
require_relative "openai/resources/responses/input_items"
-
1
require_relative "openai/resources/uploads"
-
1
require_relative "openai/resources/uploads/parts"
-
1
require_relative "openai/resources/vector_stores"
-
1
require_relative "openai/resources/vector_stores/file_batches"
-
1
require_relative "openai/resources/vector_stores/files"
-
1
require_relative "openai/resources/webhooks"
-
1
require_relative "openai/helpers/streaming/events"
-
1
require_relative "openai/helpers/streaming/response_stream"
-
1
require_relative "openai/streaming"
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
class Client < OpenAI::Internal::Transport::BaseClient
-
# Default max number of retries to attempt after a failed retryable request.
-
1
DEFAULT_MAX_RETRIES = 2
-
-
# Default per-request timeout.
-
1
DEFAULT_TIMEOUT_IN_SECONDS = 600.0
-
-
# Default initial retry delay in seconds.
-
# Overall delay is calculated using exponential backoff + jitter.
-
1
DEFAULT_INITIAL_RETRY_DELAY = 0.5
-
-
# Default max retry delay in seconds.
-
1
DEFAULT_MAX_RETRY_DELAY = 8.0
-
-
# @return [String]
-
1
attr_reader :api_key
-
-
# @return [String, nil]
-
1
attr_reader :organization
-
-
# @return [String, nil]
-
1
attr_reader :project
-
-
# @return [String, nil]
-
1
attr_reader :webhook_secret
-
-
# @return [OpenAI::Resources::Completions]
-
1
attr_reader :completions
-
-
# @return [OpenAI::Resources::Chat]
-
1
attr_reader :chat
-
-
# @return [OpenAI::Resources::Embeddings]
-
1
attr_reader :embeddings
-
-
# @return [OpenAI::Resources::Files]
-
1
attr_reader :files
-
-
# @return [OpenAI::Resources::Images]
-
1
attr_reader :images
-
-
# @return [OpenAI::Resources::Audio]
-
1
attr_reader :audio
-
-
# @return [OpenAI::Resources::Moderations]
-
1
attr_reader :moderations
-
-
# @return [OpenAI::Resources::Models]
-
1
attr_reader :models
-
-
# @return [OpenAI::Resources::FineTuning]
-
1
attr_reader :fine_tuning
-
-
# @return [OpenAI::Resources::Graders]
-
1
attr_reader :graders
-
-
# @return [OpenAI::Resources::VectorStores]
-
1
attr_reader :vector_stores
-
-
# @return [OpenAI::Resources::Webhooks]
-
1
attr_reader :webhooks
-
-
# @return [OpenAI::Resources::Beta]
-
1
attr_reader :beta
-
-
# @return [OpenAI::Resources::Batches]
-
1
attr_reader :batches
-
-
# @return [OpenAI::Resources::Uploads]
-
1
attr_reader :uploads
-
-
# @return [OpenAI::Resources::Responses]
-
1
attr_reader :responses
-
-
# @return [OpenAI::Resources::Evals]
-
1
attr_reader :evals
-
-
# @return [OpenAI::Resources::Containers]
-
1
attr_reader :containers
-
-
# @api private
-
#
-
# @return [Hash{String=>String}]
-
1
private def auth_headers
-
2
then: 0
else: 2
return {} if @api_key.nil?
-
-
2
{"authorization" => "Bearer #{@api_key}"}
-
end
-
-
# Creates and returns a new client for interacting with the API.
-
#
-
# @param api_key [String, nil] Defaults to `ENV["OPENAI_API_KEY"]`
-
#
-
# @param organization [String, nil] Defaults to `ENV["OPENAI_ORG_ID"]`
-
#
-
# @param project [String, nil] Defaults to `ENV["OPENAI_PROJECT_ID"]`
-
#
-
# @param webhook_secret [String, nil] Defaults to `ENV["OPENAI_WEBHOOK_SECRET"]`
-
#
-
# @param base_url [String, nil] Override the default base URL for the API, e.g.,
-
# `"https://api.example.com/v2/"`. Defaults to `ENV["OPENAI_BASE_URL"]`
-
#
-
# @param max_retries [Integer] Max number of retries to attempt after a failed retryable request.
-
#
-
# @param timeout [Float]
-
#
-
# @param initial_retry_delay [Float]
-
#
-
# @param max_retry_delay [Float]
-
1
def initialize(
-
api_key: ENV["OPENAI_API_KEY"],
-
organization: ENV["OPENAI_ORG_ID"],
-
project: ENV["OPENAI_PROJECT_ID"],
-
webhook_secret: ENV["OPENAI_WEBHOOK_SECRET"],
-
base_url: ENV["OPENAI_BASE_URL"],
-
max_retries: self.class::DEFAULT_MAX_RETRIES,
-
timeout: self.class::DEFAULT_TIMEOUT_IN_SECONDS,
-
initial_retry_delay: self.class::DEFAULT_INITIAL_RETRY_DELAY,
-
max_retry_delay: self.class::DEFAULT_MAX_RETRY_DELAY
-
)
-
2
base_url ||= "https://api.openai.com/v1"
-
-
2
then: 0
else: 2
if api_key.nil?
-
raise ArgumentError.new("api_key is required, and can be set via environ: \"OPENAI_API_KEY\"")
-
end
-
-
headers = {
-
4
then: 0
else: 2
"openai-organization" => (@organization = organization&.to_s),
-
2
then: 0
else: 2
"openai-project" => (@project = project&.to_s)
-
}
-
-
2
@api_key = api_key.to_s
-
2
then: 0
else: 2
@webhook_secret = webhook_secret&.to_s
-
-
2
super(
-
base_url: base_url,
-
timeout: timeout,
-
max_retries: max_retries,
-
initial_retry_delay: initial_retry_delay,
-
max_retry_delay: max_retry_delay,
-
headers: headers
-
)
-
-
2
@completions = OpenAI::Resources::Completions.new(client: self)
-
2
@chat = OpenAI::Resources::Chat.new(client: self)
-
2
@embeddings = OpenAI::Resources::Embeddings.new(client: self)
-
2
@files = OpenAI::Resources::Files.new(client: self)
-
2
@images = OpenAI::Resources::Images.new(client: self)
-
2
@audio = OpenAI::Resources::Audio.new(client: self)
-
2
@moderations = OpenAI::Resources::Moderations.new(client: self)
-
2
@models = OpenAI::Resources::Models.new(client: self)
-
2
@fine_tuning = OpenAI::Resources::FineTuning.new(client: self)
-
2
@graders = OpenAI::Resources::Graders.new(client: self)
-
2
@vector_stores = OpenAI::Resources::VectorStores.new(client: self)
-
2
@webhooks = OpenAI::Resources::Webhooks.new(client: self)
-
2
@beta = OpenAI::Resources::Beta.new(client: self)
-
2
@batches = OpenAI::Resources::Batches.new(client: self)
-
2
@uploads = OpenAI::Resources::Uploads.new(client: self)
-
2
@responses = OpenAI::Resources::Responses.new(client: self)
-
2
@evals = OpenAI::Resources::Evals.new(client: self)
-
2
@containers = OpenAI::Resources::Containers.new(client: self)
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Errors
-
1
class Error < StandardError
-
# @!attribute cause
-
#
-
# @return [StandardError, nil]
-
end
-
-
1
class InvalidWebhookSignatureError < OpenAI::Errors::Error
-
end
-
-
1
class ConversionError < OpenAI::Errors::Error
-
# @return [StandardError, nil]
-
1
then: 0
else: 0
def cause = @cause.nil? ? super : @cause
-
-
# @api private
-
#
-
# @param on [Class<StandardError>]
-
# @param method [Symbol]
-
# @param target [Object]
-
# @param value [Object]
-
# @param cause [StandardError, nil]
-
1
def initialize(on:, method:, target:, value:, cause: nil)
-
cls = on.name.split("::").last
-
-
message = [
-
"Failed to parse #{cls}.#{method} from #{value.class} to #{target.inspect}.",
-
"To get the unparsed API response, use #{cls}[#{method.inspect}].",
-
cause && "Cause: #{cause.message}"
-
].filter(&:itself).join(" ")
-
-
@cause = cause
-
super(message)
-
end
-
end
-
-
1
class APIError < OpenAI::Errors::Error
-
# @return [URI::Generic]
-
1
attr_accessor :url
-
-
# @return [Integer, nil]
-
1
attr_accessor :status
-
-
# @return [Object, nil]
-
1
attr_accessor :body
-
-
# @return [String, nil]
-
1
attr_accessor :code
-
-
# @return [String, nil]
-
1
attr_accessor :param
-
-
# @return [String, nil]
-
1
attr_accessor :type
-
-
# @api private
-
#
-
# @param url [URI::Generic]
-
# @param status [Integer, nil]
-
# @param body [Object, nil]
-
# @param request [nil]
-
# @param response [nil]
-
# @param message [String, nil]
-
1
def initialize(url:, status: nil, body: nil, request: nil, response: nil, message: nil)
-
2
@url = url
-
2
@status = status
-
2
@body = body
-
2
@request = request
-
2
@response = response
-
2
super(message)
-
end
-
end
-
-
1
class APIConnectionError < OpenAI::Errors::APIError
-
# @!attribute status
-
#
-
# @return [nil]
-
-
# @!attribute body
-
#
-
# @return [nil]
-
-
# @!attribute code
-
#
-
# @return [nil]
-
-
# @!attribute param
-
#
-
# @return [nil]
-
-
# @!attribute type
-
#
-
# @return [nil]
-
-
# @api private
-
#
-
# @param url [URI::Generic]
-
# @param status [nil]
-
# @param body [nil]
-
# @param request [nil]
-
# @param response [nil]
-
# @param message [String, nil]
-
1
def initialize(
-
url:,
-
status: nil,
-
body: nil,
-
request: nil,
-
response: nil,
-
message: "Connection error."
-
)
-
super
-
end
-
end
-
-
1
class APITimeoutError < OpenAI::Errors::APIConnectionError
-
# @api private
-
#
-
# @param url [URI::Generic]
-
# @param status [nil]
-
# @param body [nil]
-
# @param request [nil]
-
# @param response [nil]
-
# @param message [String, nil]
-
1
def initialize(
-
url:,
-
status: nil,
-
body: nil,
-
request: nil,
-
response: nil,
-
message: "Request timed out."
-
)
-
super
-
end
-
end
-
-
1
class APIStatusError < OpenAI::Errors::APIError
-
# @api private
-
#
-
# @param url [URI::Generic]
-
# @param status [Integer]
-
# @param body [Object, nil]
-
# @param request [nil]
-
# @param response [nil]
-
# @param message [String, nil]
-
#
-
# @return [self]
-
1
def self.for(url:, status:, body:, request:, response:, message: nil)
-
kwargs = {
-
2
url: url,
-
status: status,
-
body: body,
-
request: request,
-
response: response,
-
message: message
-
}
-
-
case status
-
2
in: 2
in 400
-
2
OpenAI::Errors::BadRequestError.new(**kwargs)
-
in: 0
in 401
-
OpenAI::Errors::AuthenticationError.new(**kwargs)
-
in: 0
in 403
-
OpenAI::Errors::PermissionDeniedError.new(**kwargs)
-
in: 0
in 404
-
OpenAI::Errors::NotFoundError.new(**kwargs)
-
in: 0
in 409
-
OpenAI::Errors::ConflictError.new(**kwargs)
-
in: 0
in 422
-
OpenAI::Errors::UnprocessableEntityError.new(**kwargs)
-
in: 0
in 429
-
OpenAI::Errors::RateLimitError.new(**kwargs)
-
in: 0
in (500..)
-
OpenAI::Errors::InternalServerError.new(**kwargs)
-
else: 0
else
-
OpenAI::Errors::APIStatusError.new(**kwargs)
-
end
-
end
-
-
# @!parse
-
# # @return [Integer]
-
# attr_accessor :status
-
-
# @!parse
-
# # @return [String, nil]
-
# attr_accessor :code
-
-
# @!parse
-
# # @return [String, nil]
-
# attr_accessor :param
-
-
# @!parse
-
# # @return [String, nil]
-
# attr_accessor :type
-
-
# @api private
-
#
-
# @param url [URI::Generic]
-
# @param status [Integer]
-
# @param body [Object, nil]
-
# @param request [nil]
-
# @param response [nil]
-
# @param message [String, nil]
-
1
def initialize(url:, status:, body:, request:, response:, message: nil)
-
4
message ||= OpenAI::Internal::Util.dig(body, :message) { {url: url.to_s, status: status, body: body} }
-
2
@code = OpenAI::Internal::Type::Converter.coerce(String, OpenAI::Internal::Util.dig(body, :code))
-
2
@param = OpenAI::Internal::Type::Converter.coerce(String, OpenAI::Internal::Util.dig(body, :param))
-
2
@type = OpenAI::Internal::Type::Converter.coerce(String, OpenAI::Internal::Util.dig(body, :type))
-
2
super(
-
url: url,
-
status: status,
-
body: body,
-
request: request,
-
response: response,
-
then: 2
else: 0
message: message&.to_s
-
)
-
end
-
end
-
-
1
class BadRequestError < OpenAI::Errors::APIStatusError
-
1
HTTP_STATUS = 400
-
end
-
-
1
class AuthenticationError < OpenAI::Errors::APIStatusError
-
1
HTTP_STATUS = 401
-
end
-
-
1
class PermissionDeniedError < OpenAI::Errors::APIStatusError
-
1
HTTP_STATUS = 403
-
end
-
-
1
class NotFoundError < OpenAI::Errors::APIStatusError
-
1
HTTP_STATUS = 404
-
end
-
-
1
class ConflictError < OpenAI::Errors::APIStatusError
-
1
HTTP_STATUS = 409
-
end
-
-
1
class UnprocessableEntityError < OpenAI::Errors::APIStatusError
-
1
HTTP_STATUS = 422
-
end
-
-
1
class RateLimitError < OpenAI::Errors::APIStatusError
-
1
HTTP_STATUS = 429
-
end
-
-
1
class InternalServerError < OpenAI::Errors::APIStatusError
-
1
HTTP_STATUS = (500..)
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
class FilePart
-
# @return [Pathname, StringIO, IO, String]
-
1
attr_reader :content
-
-
# @return [String, nil]
-
1
attr_reader :content_type
-
-
# @return [String, nil]
-
1
attr_reader :filename
-
-
# @api private
-
#
-
# @return [String]
-
1
private def read
-
else: 0
case content
-
in: 0
in Pathname
-
content.read(binmode: true)
-
in: 0
in StringIO
-
content.string
-
in: 0
in IO
-
content.read
-
in: 0
in String
-
content
-
end
-
end
-
-
# @param a [Object]
-
#
-
# @return [String]
-
1
def to_json(*a) = read.to_json(*a)
-
-
# @param a [Object]
-
#
-
# @return [String]
-
1
def to_yaml(*a) = read.to_yaml(*a)
-
-
# @param content [Pathname, StringIO, IO, String]
-
# @param filename [String, nil]
-
# @param content_type [String, nil]
-
1
def initialize(content, filename: nil, content_type: nil)
-
@content = content
-
@filename =
-
case content
-
in: 0
in Pathname
-
then: 0
else: 0
filename.nil? ? content.basename.to_path : ::File.basename(filename)
-
else: 0
else
-
then: 0
else: 0
filename.nil? ? nil : ::File.basename(filename)
-
end
-
@content_type = content_type
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Helpers
-
1
module Streaming
-
1
class ResponseTextDeltaEvent < OpenAI::Models::Responses::ResponseTextDeltaEvent
-
1
required :snapshot, String
-
end
-
-
1
class ResponseTextDoneEvent < OpenAI::Models::Responses::ResponseTextDoneEvent
-
1
optional :parsed, Object
-
end
-
-
1
class ResponseFunctionCallArgumentsDeltaEvent < OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent
-
1
required :snapshot, String
-
end
-
-
1
class ResponseCompletedEvent < OpenAI::Models::Responses::ResponseCompletedEvent
-
1
required :response, OpenAI::Models::Responses::Response
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
require_relative "events"
-
-
1
module OpenAI
-
1
module Helpers
-
1
module Streaming
-
1
class ResponseStream
-
1
include OpenAI::Internal::Type::BaseStream
-
-
1
def initialize(raw_stream:, text_format: nil, starting_after: nil)
-
@text_format = text_format
-
@starting_after = starting_after
-
@raw_stream = raw_stream
-
@iterator = iterator
-
@state = ResponseStreamState.new(
-
text_format: text_format
-
)
-
end
-
-
1
def until_done
-
each {} # rubocop:disable Lint/EmptyBlock
-
self
-
end
-
-
1
def text
-
OpenAI::Internal::Util.chain_fused(@iterator) do |yielder|
-
@iterator.each do |event|
-
else: 0
case event
-
when: 0
when OpenAI::Streaming::ResponseTextDeltaEvent
-
yielder << event.delta
-
end
-
end
-
end
-
end
-
-
1
def get_final_response
-
until_done
-
response = @state.completed_response
-
else: 0
then: 0
raise RuntimeError.new("Didn't receive a 'response.completed' event") unless response
-
response
-
end
-
-
1
def get_output_text
-
response = get_final_response
-
text_parts = []
-
-
response.output.each do |output|
-
else: 0
then: 0
next unless output.type == :message
-
-
output.content.each do |content|
-
else: 0
then: 0
next unless content.type == :output_text
-
text_parts << content.text
-
end
-
end
-
-
text_parts.join
-
end
-
-
1
private
-
-
1
def iterator
-
@iterator ||= OpenAI::Internal::Util.chain_fused(@raw_stream) do |y|
-
@raw_stream.each do |raw_event|
-
events_to_yield = @state.handle_event(raw_event)
-
events_to_yield.each do |event|
-
then: 0
else: 0
if @starting_after.nil? || event.sequence_number > @starting_after
-
y << event
-
end
-
end
-
end
-
end
-
end
-
end
-
-
1
class ResponseStreamState
-
1
attr_reader :completed_response
-
-
1
def initialize(text_format:)
-
@current_snapshot = nil
-
@completed_response = nil
-
@text_format = text_format
-
end
-
-
1
def handle_event(event)
-
@current_snapshot = accumulate_event(
-
event: event,
-
current_snapshot: @current_snapshot
-
)
-
-
events_to_yield = []
-
-
case event
-
when: 0
when OpenAI::Models::Responses::ResponseTextDeltaEvent
-
output = @current_snapshot.output[event.output_index]
-
assert_type(output, :message)
-
-
content = output.content[event.content_index]
-
assert_type(content, :output_text)
-
-
events_to_yield << OpenAI::Streaming::ResponseTextDeltaEvent.new(
-
content_index: event.content_index,
-
delta: event.delta,
-
item_id: event.item_id,
-
output_index: event.output_index,
-
sequence_number: event.sequence_number,
-
type: event.type,
-
snapshot: content.text
-
)
-
-
when: 0
when OpenAI::Models::Responses::ResponseTextDoneEvent
-
output = @current_snapshot.output[event.output_index]
-
assert_type(output, :message)
-
-
content = output.content[event.content_index]
-
assert_type(content, :output_text)
-
-
parsed = parse_structured_text(content.text)
-
-
events_to_yield << OpenAI::Streaming::ResponseTextDoneEvent.new(
-
content_index: event.content_index,
-
item_id: event.item_id,
-
output_index: event.output_index,
-
sequence_number: event.sequence_number,
-
text: event.text,
-
type: event.type,
-
parsed: parsed
-
)
-
-
when: 0
when OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent
-
output = @current_snapshot.output[event.output_index]
-
assert_type(output, :function_call)
-
-
events_to_yield << OpenAI::Streaming::ResponseFunctionCallArgumentsDeltaEvent.new(
-
delta: event.delta,
-
item_id: event.item_id,
-
output_index: event.output_index,
-
sequence_number: event.sequence_number,
-
type: event.type,
-
snapshot: output.arguments
-
)
-
-
when: 0
when OpenAI::Models::Responses::ResponseCompletedEvent
-
events_to_yield << OpenAI::Streaming::ResponseCompletedEvent.new(
-
sequence_number: event.sequence_number,
-
type: event.type,
-
response: event.response
-
)
-
-
else
-
else: 0
# Pass through other events unchanged.
-
events_to_yield << event
-
end
-
-
events_to_yield
-
end
-
-
1
def accumulate_event(event:, current_snapshot:)
-
then: 0
else: 0
if current_snapshot.nil?
-
else: 0
then: 0
unless event.is_a?(OpenAI::Models::Responses::ResponseCreatedEvent)
-
raise "Expected first event to be response.created"
-
end
-
-
# Use the converter to create a new, isolated copy of the response object.
-
# This ensures proper type validation and prevents shared object references.
-
return OpenAI::Internal::Type::Converter.coerce(
-
OpenAI::Models::Responses::Response,
-
event.response
-
)
-
end
-
-
else: 0
case event
-
when: 0
when OpenAI::Models::Responses::ResponseOutputItemAddedEvent
-
current_snapshot.output.push(event.item)
-
-
when: 0
when OpenAI::Models::Responses::ResponseContentPartAddedEvent
-
output = current_snapshot.output[event.output_index]
-
then: 0
else: 0
if output && output.type == :message
-
output.content.push(event.part)
-
current_snapshot.output[event.output_index] = output
-
end
-
-
when: 0
when OpenAI::Models::Responses::ResponseTextDeltaEvent
-
output = current_snapshot.output[event.output_index]
-
then: 0
else: 0
if output && output.type == :message
-
content = output.content[event.content_index]
-
then: 0
else: 0
if content && content.type == :output_text
-
content.text += event.delta
-
output.content[event.content_index] = content
-
current_snapshot.output[event.output_index] = output
-
end
-
end
-
-
when: 0
when OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent
-
output = current_snapshot.output[event.output_index]
-
then: 0
else: 0
if output && output.type == :function_call
-
output.arguments = (output.arguments || "") + event.delta
-
current_snapshot.output[event.output_index] = output
-
end
-
-
when: 0
when OpenAI::Models::Responses::ResponseCompletedEvent
-
@completed_response = event.response
-
end
-
-
current_snapshot
-
end
-
-
1
private
-
-
1
def assert_type(object, expected_type)
-
then: 0
else: 0
return if object && object.type == expected_type
-
then: 0
else: 0
actual_type = object ? object.type : "nil"
-
raise "Invalid state: expected #{expected_type} but got #{actual_type}"
-
end
-
-
1
def parse_structured_text(text)
-
else: 0
then: 0
return nil unless @text_format && text
-
-
begin
-
parsed = JSON.parse(text, symbolize_names: true)
-
OpenAI::Internal::Type::Converter.coerce(@text_format, parsed)
-
rescue JSON::ParserError => e
-
raise RuntimeError.new(
-
"Failed to parse structured text as JSON for #{@text_format}: #{e.message}. " \
-
"Raw text: #{text.inspect}"
-
)
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Helpers
-
# Helpers for the structured output API.
-
#
-
# see https://platform.openai.com/docs/guides/structured-outputs
-
# see https://json-schema.org
-
#
-
# Based on the DSL in {OpenAI::Internal::Type}, but currently only support the limited subset of JSON schema types used in structured output APIs.
-
#
-
# Supported types: {NilClass} {String} {Symbol} {Integer} {Float} {OpenAI::Boolean}, {OpenAI::EnumOf}, {OpenAI::UnionOf}, {OpenAI::ArrayOf}, {OpenAI::BaseModel}
-
1
module StructuredOutput
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Helpers
-
1
module StructuredOutput
-
# @generic Elem
-
#
-
# @example
-
# example = OpenAI::ArrayOf[Integer]
-
#
-
# @example
-
# example = OpenAI::ArrayOf[Integer, nil?: true, doc: "hi there!"]
-
1
class ArrayOf < OpenAI::Internal::Type::ArrayOf
-
1
include OpenAI::Helpers::StructuredOutput::JsonSchemaConverter
-
-
# @api private
-
#
-
# @param state [Hash{Symbol=>Object}]
-
#
-
# @option state [Hash{Object=>String}] :defs
-
#
-
# @option state [Array<String>] :path
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
def to_json_schema_inner(state:)
-
OpenAI::Helpers::StructuredOutput::JsonSchemaConverter.cache_def!(state, type: self) do
-
state.fetch(:path) << "[]"
-
items = OpenAI::Helpers::StructuredOutput::JsonSchemaConverter.to_json_schema_inner(
-
item_type,
-
state: state
-
)
-
then: 0
else: 0
items = OpenAI::Helpers::StructuredOutput::JsonSchemaConverter.to_nilable(items) if nilable?
-
-
schema = {type: "array", items: items}
-
then: 0
else: 0
description.nil? ? schema : schema.update(description: description)
-
end
-
end
-
-
# @return [String, nil]
-
1
attr_reader :description
-
-
1
def initialize(type_info, spec = {})
-
super
-
@description = [type_info, spec].grep(Hash).filter_map { _1[:doc] }.first
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Helpers
-
1
module StructuredOutput
-
# Represents a response from OpenAI's API where the model's output has been structured according to a schema predefined by the user.
-
#
-
# This class is specifically used when making requests with the `response_format` parameter set to use structured output (e.g., JSON).
-
#
-
# See {examples/structured_outputs_chat_completions.rb} for a complete example of use
-
1
class BaseModel < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Helpers::StructuredOutput::JsonSchemaConverter
-
-
1
class << self
-
# @return [Hash{Symbol=>Object}]
-
1
def to_json_schema = OpenAI::Helpers::StructuredOutput::JsonSchemaConverter.to_json_schema(self)
-
-
# @api private
-
#
-
# @param state [Hash{Symbol=>Object}]
-
#
-
# @option state [Hash{Object=>String}] :defs
-
#
-
# @option state [Array<String>] :path
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
def to_json_schema_inner(state:)
-
OpenAI::Helpers::StructuredOutput::JsonSchemaConverter.cache_def!(state, type: self) do
-
path = state.fetch(:path)
-
properties = fields.to_h do |name, field|
-
type, nilable = field.fetch_values(:type, :nilable)
-
new_state = {**state, path: [*path, ".#{name}"]}
-
-
schema =
-
case type
-
in: 0
in {"$ref": String}
-
type
-
in: 0
in OpenAI::Helpers::StructuredOutput::JsonSchemaConverter
-
type.to_json_schema_inner(state: new_state).update(field.slice(:description))
-
else: 0
else
-
OpenAI::Helpers::StructuredOutput::JsonSchemaConverter.to_json_schema_inner(
-
type,
-
state: new_state
-
)
-
end
-
then: 0
else: 0
schema = OpenAI::Helpers::StructuredOutput::JsonSchemaConverter.to_nilable(schema) if nilable
-
[name, schema]
-
end
-
-
{
-
type: "object",
-
properties: properties,
-
required: properties.keys.map(&:to_s),
-
additionalProperties: false
-
}
-
end
-
end
-
end
-
-
1
class << self
-
1
def required(name_sym, type_info, spec = {})
-
super
-
-
doc = [type_info, spec].grep(Hash).filter_map { _1[:doc] }.first
-
else: 0
then: 0
known_fields.fetch(name_sym).update(description: doc) unless doc.nil?
-
end
-
-
1
def optional(...)
-
# rubocop:disable Layout/LineLength
-
message = "`optional` is not supported for structured output APIs, use `#required` with `nil?: true` instead"
-
# rubocop:enable Layout/LineLength
-
raise RuntimeError.new(message)
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Helpers
-
1
module StructuredOutput
-
# @abstract
-
#
-
# Ruby does not have a "boolean" Class, this is something for models to refer to.
-
1
class Boolean < OpenAI::Internal::Type::Boolean
-
1
extend OpenAI::Helpers::StructuredOutput::JsonSchemaConverter
-
# rubocop:disable Lint/UnusedMethodArgument
-
-
# @api private
-
#
-
# @param state [Hash{Symbol=>Object}]
-
#
-
# @option state [Hash{Object=>String}] :defs
-
#
-
# @option state [Array<String>] :path
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
def self.to_json_schema_inner(state:) = {type: "boolean"}
-
-
# rubocop:enable Lint/UnusedMethodArgument
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Helpers
-
1
module StructuredOutput
-
# @generic Value
-
#
-
# @example
-
# example = OpenAI::EnumOf[:foo, :bar, :zoo]
-
#
-
# @example
-
# example = OpenAI::EnumOf[1, 2, 3]
-
1
class EnumOf
-
1
include OpenAI::Internal::Type::Enum
-
1
include OpenAI::Helpers::StructuredOutput::JsonSchemaConverter
-
-
# @api private
-
#
-
# @param state [Hash{Symbol=>Object}]
-
#
-
# @option state [Hash{Object=>String}] :defs
-
#
-
# @option state [Array<String>] :path
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
def to_json_schema_inner(state:)
-
OpenAI::Helpers::StructuredOutput::JsonSchemaConverter.cache_def!(state, type: self) do
-
types = values.map do
-
else: 0
case _1
-
in: 0
in NilClass
-
"null"
-
in: 0
in true | false
-
"boolean"
-
in: 0
in Integer
-
"integer"
-
in: 0
in Float
-
"number"
-
in: 0
in Symbol
-
"string"
-
end
-
end
-
.uniq
-
-
{
-
then: 0
else: 0
type: types.length == 1 ? types.first : types,
-
then: 0
else: 0
enum: values.map { _1.is_a?(Symbol) ? _1.to_s : _1 }
-
}
-
end
-
end
-
-
1
private_class_method :new
-
-
1
def self.[](...) = new(...)
-
-
# @return [Array<generic<Value>>]
-
1
attr_reader :values
-
-
# @param values [Array<generic<Value>>]
-
1
then: 0
else: 0
def initialize(*values) = (@values = values.map { _1.is_a?(String) ? _1.to_sym : _1 })
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Helpers
-
1
module StructuredOutput
-
# To customize the JSON schema conversion for a type, implement the `JsonSchemaConverter` interface.
-
1
module JsonSchemaConverter
-
# @api private
-
1
POINTERS = Object.new.tap do
-
1
_1.define_singleton_method(:inspect) do
-
"#<#{OpenAI::Helpers::StructuredOutput::JsonSchemaConverter}::POINTERS>"
-
end
-
end.freeze
-
# @api private
-
1
NO_REF = Object.new.tap do
-
1
_1.define_singleton_method(:inspect) do
-
"#<#{OpenAI::Helpers::StructuredOutput::JsonSchemaConverter}::NO_REF>"
-
end
-
end.freeze
-
-
# rubocop:disable Lint/UnusedMethodArgument
-
-
# The exact JSON schema produced is subject to improvement between minor release versions.
-
#
-
# @param state [Hash{Symbol=>Object}]
-
#
-
# @option state [Hash{Object=>String}] :defs
-
#
-
# @option state [Array<String>] :path
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
def to_json_schema_inner(state:) = (raise NotImplementedError)
-
-
# rubocop:enable Lint/UnusedMethodArgument
-
-
# Internal helpers methods.
-
1
class << self
-
# @api private
-
#
-
# @param schema [Hash{Symbol=>Object}]
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
def to_nilable(schema)
-
null = "null"
-
else: 0
case schema
-
in: 0
in {"$ref": String}
-
{
-
anyOf: [
-
schema.update(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::NO_REF => true),
-
{type: null}
-
]
-
}
-
in: 0
in {anyOf: schemas}
-
null = {type: null}
-
then: 0
else: 0
schemas.any? { _1 == null || _1 == {type: ["null"]} } ? schema : {anyOf: [*schemas, null]}
-
in: 0
in {type: String => type}
-
then: 0
else: 0
type == null ? schema : schema.update(type: [type, null])
-
in: 0
in {type: Array => types}
-
then: 0
else: 0
types.include?(null) ? schema : schema.update(type: [*types, null])
-
end
-
end
-
-
# @api private
-
#
-
# @param state [Hash{Symbol=>Object}]
-
#
-
# @option state [Hash{Object=>String}] :defs
-
#
-
# @option state [Array<String>] :path
-
#
-
# @param type [Object]
-
#
-
# @param blk [Proc]
-
#
-
1
def cache_def!(state, type:, &blk)
-
defs, path = state.fetch_values(:defs, :path)
-
then: 0
if (stored = defs[type])
-
pointers = stored.fetch(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTERS)
-
pointers.first.except(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::NO_REF).tap do
-
pointers << _1
-
end
-
else: 0
else
-
ref_path = String.new
-
ref = {"$ref": ref_path}
-
stored = {
-
OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTERS => [ref]
-
}
-
defs.store(type, stored)
-
schema = blk.call
-
ref_path.replace("#/$defs/#{path.join('/')}")
-
stored.update(schema)
-
ref
-
end
-
end
-
-
# @api private
-
#
-
# @param type [OpenAI::Helpers::StructuredOutput::JsonSchemaConverter, Class]
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
def to_json_schema(type)
-
defs = {}
-
state = {defs: defs, path: []}
-
schema = OpenAI::Helpers::StructuredOutput::JsonSchemaConverter.to_json_schema_inner(
-
type,
-
state: state
-
)
-
reused_defs = {}
-
defs.each_value do |acc|
-
sch = acc.except(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTERS)
-
pointers = acc.fetch(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::POINTERS)
-
-
no_refs, refs = pointers.partition do
-
_1.delete(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::NO_REF)
-
end
-
-
case refs
-
in: 0
in [ref]
-
ref.replace(sch)
-
in: 0
in [_, ref, *]
-
else: 0
reused_defs.store(ref.fetch(:$ref), sch)
-
else
-
end
-
no_refs.each { _1.replace(sch) }
-
end
-
-
xformed = reused_defs.transform_keys { _1.delete_prefix("#/$defs/") }
-
then: 0
else: 0
xformed.empty? ? schema : {"$defs": xformed}.update(schema)
-
end
-
-
# @api private
-
#
-
# @param type [OpenAI::Helpers::StructuredOutput::JsonSchemaConverter, Class]
-
#
-
# @param state [Hash{Symbol=>Object}]
-
#
-
# @option state [Hash{Object=>String}] :defs
-
#
-
# @option state [Array<String>] :path
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
def to_json_schema_inner(type, state:)
-
case type
-
in: 0
in {"$ref": String}
-
return type
-
in: 0
in OpenAI::Helpers::StructuredOutput::JsonSchemaConverter
-
return type.to_json_schema_inner(state: state)
-
in: 0
in Class
-
case type
-
in: 0
in -> { _1 <= NilClass }
-
return {type: "null"}
-
in: 0
in -> { _1 <= Integer }
-
return {type: "integer"}
-
in: 0
in -> { _1 <= Float }
-
return {type: "number"}
-
in: 0
in -> { _1 <= Symbol || _1 <= String }
-
else: 0
return {type: "string"}
-
else
-
end
-
in: 0
in _ if OpenAI::Internal::Util.primitive?(type)
-
else: 0
then: 0
else: 0
return {const: type.is_a?(Symbol) ? type.to_s : type}
-
else
-
end
-
-
models = %w[
-
NilClass
-
String
-
Symbol
-
Integer
-
Float
-
OpenAI::Boolean
-
OpenAI::ArrayOf
-
OpenAI::EnumOf
-
OpenAI::UnionOf
-
OpenAI::BaseModel
-
]
-
# rubocop:disable Layout/LineLength
-
message = "#{type} does not implement the #{OpenAI::Helpers::StructuredOutput::JsonSchemaConverter} interface. Please use one of the supported types: #{models}"
-
# rubocop:enable Layout/LineLength
-
raise ArgumentError.new(message)
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Helpers
-
1
module StructuredOutput
-
# @abstract
-
#
-
# Like OpenAI::Internal::Type::Unknown, but for parsed JSON values, which can be incomplete or malformed.
-
1
class ParsedJson < OpenAI::Internal::Type::Unknown
-
1
class << self
-
# @api private
-
#
-
# No coercion needed for Unknown type.
-
#
-
# @param value [Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :translate_names
-
#
-
# @option state [Boolean] :strictness
-
#
-
# @option state [Hash{Symbol=>Object}] :exactness
-
#
-
# @option state [Class<StandardError>] :error
-
#
-
# @option state [Integer] :branched
-
#
-
# @return [Object]
-
1
def coerce(value, state:)
-
then: 0
else: 0
(state[:error] = value) if value.is_a?(StandardError)
-
-
super
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Helpers
-
1
module StructuredOutput
-
# @generic Member
-
#
-
# @example
-
# example = OpenAI::UnionOf[Float, OpenAI::ArrayOf[Integer]]
-
1
class UnionOf
-
1
include OpenAI::Internal::Type::Union
-
1
include OpenAI::Helpers::StructuredOutput::JsonSchemaConverter
-
-
# @api private
-
#
-
# @param state [Hash{Symbol=>Object}]
-
#
-
# @option state [Hash{Object=>String}] :defs
-
#
-
# @option state [Array<String>] :path
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
def to_json_schema_inner(state:)
-
OpenAI::Helpers::StructuredOutput::JsonSchemaConverter.cache_def!(state, type: self) do
-
path = state.fetch(:path)
-
mergeable_keys = {[:anyOf] => 0, [:type] => 0}
-
schemas = variants.to_enum.with_index.map do
-
new_state = {**state, path: [*path, "?.#{_2}"]}
-
OpenAI::Helpers::StructuredOutput::JsonSchemaConverter.to_json_schema_inner(
-
_1,
-
state: new_state
-
)
-
end
-
-
schemas.each do |schema|
-
then: 0
else: 0
mergeable_keys.each_key { mergeable_keys[_1] += 1 if schema.keys == _1 }
-
end
-
mergeable = mergeable_keys.any? { _1.last == schemas.length }
-
then: 0
if mergeable
-
OpenAI::Internal::Util.deep_merge(*schemas, concat: true)
-
else: 0
else
-
{
-
anyOf: schemas.each do
-
then: 0
else: 0
if _1.key?(:$ref)
-
_1.update(OpenAI::Helpers::StructuredOutput::JsonSchemaConverter::NO_REF => true)
-
end
-
end
-
}
-
end
-
end
-
end
-
-
1
private_class_method :new
-
-
1
def self.[](...) = new(...)
-
-
# @param variants [Array<generic<Member>>]
-
1
def initialize(*variants)
-
case variants
-
3
in: 0
in [Symbol => d, Hash => vs]
-
discriminator(d)
-
vs.each do |k, v|
-
then: 0
else: 0
v.is_a?(Proc) ? variant(k, v) : variant(k, -> { v })
-
end
-
else: 3
else
-
3
variants.each do |v|
-
10
then: 0
else: 6
v.is_a?(Proc) ? variant(v) : variant(-> { v })
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
1
extend OpenAI::Internal::Util::SorbetRuntimeSupport
-
-
OMIT =
-
1
Object.new.tap do
-
1
_1.define_singleton_method(:inspect) { "#<#{OpenAI::Internal}::OMIT>" }
-
end
-
.freeze
-
-
1
define_sorbet_constant!(:AnyHash) do
-
T.type_alias { T::Hash[Symbol, T.anything] }
-
end
-
1
define_sorbet_constant!(:FileInput) do
-
T.type_alias { T.any(Pathname, StringIO, IO, String, OpenAI::FilePart) }
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
# @generic Elem
-
#
-
# @example
-
# if cursor_page.has_next?
-
# cursor_page = cursor_page.next_page
-
# end
-
#
-
# @example
-
# cursor_page.auto_paging_each do |completion|
-
# puts(completion)
-
# end
-
1
class CursorPage
-
1
include OpenAI::Internal::Type::BasePage
-
-
# @return [Array<generic<Elem>>, nil]
-
1
attr_accessor :data
-
-
# @return [Boolean]
-
1
attr_accessor :has_more
-
-
# @return [Boolean]
-
1
def next_page?
-
has_more
-
end
-
-
# @raise [OpenAI::HTTP::Error]
-
# @return [self]
-
1
def next_page
-
else: 0
then: 0
unless next_page?
-
message = "No more pages available. Please check #next_page? before calling ##{__method__}"
-
raise RuntimeError.new(message)
-
end
-
-
then: 0
else: 0
then: 0
else: 0
req = OpenAI::Internal::Util.deep_merge(@req, {query: {after: data&.last&.id}})
-
@client.request(req)
-
end
-
-
# @param blk [Proc]
-
#
-
# @yieldparam [generic<Elem>]
-
1
def auto_paging_each(&blk)
-
else: 0
then: 0
unless block_given?
-
raise ArgumentError.new("A block must be given to ##{__method__}")
-
end
-
-
page = self
-
loop do
-
then: 0
else: 0
page.data&.each(&blk)
-
-
else: 0
then: 0
break unless page.next_page?
-
page = page.next_page
-
end
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Internal::Transport::BaseClient]
-
# @param req [Hash{Symbol=>Object}]
-
# @param headers [Hash{String=>String}, Net::HTTPHeader]
-
# @param page_data [Hash{Symbol=>Object}]
-
1
def initialize(client:, req:, headers:, page_data:)
-
super
-
-
case page_data
-
in: 0
in {data: Array => data}
-
else: 0
@data = data.map { OpenAI::Internal::Type::Converter.coerce(@model, _1) }
-
else
-
end
-
@has_more = page_data[:has_more]
-
end
-
-
# @api private
-
#
-
# @return [String]
-
1
def inspect
-
model = OpenAI::Internal::Type::Converter.inspect(@model, depth: 1)
-
-
"#<#{self.class}[#{model}]:0x#{object_id.to_s(16)} has_more=#{has_more.inspect}>"
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
# @generic Elem
-
#
-
# @example
-
# if page.has_next?
-
# page = page.next_page
-
# end
-
#
-
# @example
-
# page.auto_paging_each do |model|
-
# puts(model)
-
# end
-
1
class Page
-
1
include OpenAI::Internal::Type::BasePage
-
-
# @return [Array<generic<Elem>>, nil]
-
1
attr_accessor :data
-
-
# @return [String]
-
1
attr_accessor :object
-
-
# @return [Boolean]
-
1
def next_page?
-
false
-
end
-
-
# @raise [OpenAI::HTTP::Error]
-
# @return [self]
-
1
def next_page
-
RuntimeError.new("No more pages available.")
-
end
-
-
# @param blk [Proc]
-
#
-
# @yieldparam [generic<Elem>]
-
1
def auto_paging_each(&blk)
-
else: 0
then: 0
unless block_given?
-
raise ArgumentError.new("A block must be given to ##{__method__}")
-
end
-
-
page = self
-
loop do
-
then: 0
else: 0
page.data&.each(&blk)
-
-
else: 0
then: 0
break unless page.next_page?
-
page = page.next_page
-
end
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Internal::Transport::BaseClient]
-
# @param req [Hash{Symbol=>Object}]
-
# @param headers [Hash{String=>String}, Net::HTTPHeader]
-
# @param page_data [Array<Object>]
-
1
def initialize(client:, req:, headers:, page_data:)
-
super
-
-
case page_data
-
in: 0
in {data: Array => data}
-
else: 0
@data = data.map { OpenAI::Internal::Type::Converter.coerce(@model, _1) }
-
else
-
end
-
@object = page_data[:object]
-
end
-
-
# @api private
-
#
-
# @return [String]
-
1
def inspect
-
model = OpenAI::Internal::Type::Converter.inspect(@model, depth: 1)
-
-
"#<#{self.class}[#{model}]:0x#{object_id.to_s(16)} object=#{object.inspect}>"
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
# @generic Elem
-
#
-
# @example
-
# stream.each do |event|
-
# puts(event)
-
# end
-
1
class Stream
-
1
include OpenAI::Internal::Type::BaseStream
-
-
# @api private
-
#
-
# @return [Enumerable<generic<Elem>>]
-
1
private def iterator
-
# rubocop:disable Metrics/BlockLength
-
@iterator ||= OpenAI::Internal::Util.chain_fused(@stream) do |y|
-
consume = false
-
-
@stream.each do |msg|
-
then: 0
else: 0
next if consume
-
-
case msg
-
in: 0
in {data: String => data} if data.start_with?("[DONE]")
-
consume = true
-
next
-
in: 0
in {data: String => data}
-
else: 0
case JSON.parse(data, symbolize_names: true)
-
in: 0
in {error: error}
-
message =
-
case error
-
in: 0
in String
-
error
-
in: 0
in {message: String => m}
-
m
-
else: 0
else
-
"An error occurred during streaming"
-
end
-
OpenAI::Errors::APIError.for(
-
url: @url,
-
status: @status,
-
body: body,
-
request: nil,
-
response: @response,
-
message: message
-
)
-
in: 0
in decoded
-
unwrapped = OpenAI::Internal::Util.dig(decoded, @unwrap)
-
y << OpenAI::Internal::Type::Converter.coerce(@model, unwrapped)
-
else: 0
end
-
else
-
end
-
end
-
end
-
# rubocop:enable Metrics/BlockLength
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
1
module Transport
-
# @api private
-
#
-
# @abstract
-
1
class BaseClient
-
1
extend OpenAI::Internal::Util::SorbetRuntimeSupport
-
-
# from whatwg fetch spec
-
1
MAX_REDIRECTS = 20
-
-
# rubocop:disable Style/MutableConstant
-
PLATFORM_HEADERS =
-
{
-
1
"x-stainless-arch" => OpenAI::Internal::Util.arch,
-
"x-stainless-lang" => "ruby",
-
"x-stainless-os" => OpenAI::Internal::Util.os,
-
"x-stainless-package-version" => OpenAI::VERSION,
-
"x-stainless-runtime" => ::RUBY_ENGINE,
-
"x-stainless-runtime-version" => ::RUBY_ENGINE_VERSION
-
}
-
# rubocop:enable Style/MutableConstant
-
-
1
class << self
-
# @api private
-
#
-
# @param req [Hash{Symbol=>Object}]
-
#
-
# @raise [ArgumentError]
-
1
def validate!(req)
-
2
keys = [:method, :path, :query, :headers, :body, :unwrap, :page, :stream, :model, :options]
-
case req
-
2
in: 2
in Hash
-
2
req.each_key do |k|
-
12
else: 12
then: 0
unless keys.include?(k)
-
raise ArgumentError.new("Request `req` keys must be one of #{keys}, got #{k.inspect}")
-
end
-
end
-
else: 0
else
-
raise ArgumentError.new("Request `req` must be a Hash or RequestOptions, got #{req.inspect}")
-
end
-
end
-
-
# @api private
-
#
-
# @param status [Integer]
-
# @param headers [Hash{String=>String}, Net::HTTPHeader]
-
#
-
# @return [Boolean]
-
1
def should_retry?(status, headers:)
-
2
coerced = OpenAI::Internal::Util.coerce_boolean(headers["x-should-retry"])
-
case [coerced, status]
-
2
in: 0
in [true | false, _]
-
coerced
-
in [_, 408 | 409 | 429 | (500..)]
-
# retry on:
-
# 408: timeouts
-
# 409: locks
-
# 429: rate limits
-
in: 0
# 500+: unknown errors
-
true
-
else: 2
else
-
2
false
-
end
-
end
-
-
# @api private
-
#
-
# @param request [Hash{Symbol=>Object}] .
-
#
-
# @option request [Symbol] :method
-
#
-
# @option request [URI::Generic] :url
-
#
-
# @option request [Hash{String=>String}] :headers
-
#
-
# @option request [Object] :body
-
#
-
# @option request [Integer] :max_retries
-
#
-
# @option request [Float] :timeout
-
#
-
# @param status [Integer]
-
#
-
# @param response_headers [Hash{String=>String}, Net::HTTPHeader]
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
def follow_redirect(request, status:, response_headers:)
-
method, url, headers = request.fetch_values(:method, :url, :headers)
-
location =
-
Kernel.then do
-
URI.join(url, response_headers["location"])
-
rescue ArgumentError
-
message = "Server responded with status #{status} but no valid location header."
-
raise OpenAI::Errors::APIConnectionError.new(
-
url: url,
-
response: response_headers,
-
message: message
-
)
-
end
-
-
request = {**request, url: location}
-
-
case [url.scheme, location.scheme]
-
in: 0
in ["https", "http"]
-
message = "Tried to redirect to a insecure URL"
-
raise OpenAI::Errors::APIConnectionError.new(
-
url: url,
-
response: response_headers,
-
message: message
-
)
-
else: 0
else
-
nil
-
end
-
-
# from whatwg fetch spec
-
case [status, method]
-
in: 0
in [301 | 302, :post] | [303, _]
-
drop = %w[content-encoding content-language content-length content-location content-type]
-
request = {
-
**request,
-
then: 0
else: 0
method: method == :head ? :head : :get,
-
headers: headers.except(*drop),
-
body: nil
-
else: 0
}
-
else
-
end
-
-
# from undici
-
then: 0
else: 0
if OpenAI::Internal::Util.uri_origin(url) != OpenAI::Internal::Util.uri_origin(location)
-
drop = %w[authorization cookie host proxy-authorization]
-
request = {**request, headers: request.fetch(:headers).except(*drop)}
-
end
-
-
request
-
end
-
-
# @api private
-
#
-
# @param status [Integer, OpenAI::Errors::APIConnectionError]
-
# @param stream [Enumerable<String>, nil]
-
1
def reap_connection!(status, stream:)
-
case status
-
2
in: 2
in (..199) | (300..499)
-
2
then: 2
else: 0
stream&.each { next }
-
in: 0
in OpenAI::Errors::APIConnectionError | (500..)
-
else: 0
OpenAI::Internal::Util.close_fused!(stream)
-
else
-
end
-
end
-
end
-
-
# @return [URI::Generic]
-
1
attr_reader :base_url
-
-
# @return [Float]
-
1
attr_reader :timeout
-
-
# @return [Integer]
-
1
attr_reader :max_retries
-
-
# @return [Float]
-
1
attr_reader :initial_retry_delay
-
-
# @return [Float]
-
1
attr_reader :max_retry_delay
-
-
# @return [Hash{String=>String}]
-
1
attr_reader :headers
-
-
# @return [String, nil]
-
1
attr_reader :idempotency_header
-
-
# @api private
-
# @return [OpenAI::Internal::Transport::PooledNetRequester]
-
1
attr_reader :requester
-
-
# @api private
-
#
-
# @param base_url [String]
-
# @param timeout [Float]
-
# @param max_retries [Integer]
-
# @param initial_retry_delay [Float]
-
# @param max_retry_delay [Float]
-
# @param headers [Hash{String=>String, Integer, Array<String, Integer, nil>, nil}]
-
# @param idempotency_header [String, nil]
-
1
def initialize(
-
base_url:,
-
timeout: 0.0,
-
max_retries: 0,
-
initial_retry_delay: 0.0,
-
max_retry_delay: 0.0,
-
headers: {},
-
idempotency_header: nil
-
)
-
2
@requester = OpenAI::Internal::Transport::PooledNetRequester.new
-
2
@headers = OpenAI::Internal::Util.normalized_headers(
-
self.class::PLATFORM_HEADERS,
-
{
-
"accept" => "application/json",
-
"content-type" => "application/json"
-
},
-
headers
-
)
-
2
@base_url_components = OpenAI::Internal::Util.parse_uri(base_url)
-
2
@base_url = OpenAI::Internal::Util.unparse_uri(@base_url_components)
-
2
then: 0
else: 2
then: 0
else: 2
@idempotency_header = idempotency_header&.to_s&.downcase
-
2
@timeout = timeout
-
2
@max_retries = max_retries
-
2
@initial_retry_delay = initial_retry_delay
-
2
@max_retry_delay = max_retry_delay
-
end
-
-
# @api private
-
#
-
# @return [Hash{String=>String}]
-
1
private def auth_headers = {}
-
-
# @api private
-
#
-
# @return [String]
-
1
private def generate_idempotency_key = "stainless-ruby-retry-#{SecureRandom.uuid}"
-
-
# @api private
-
#
-
# @param req [Hash{Symbol=>Object}] .
-
#
-
# @option req [Symbol] :method
-
#
-
# @option req [String, Array<String>] :path
-
#
-
# @option req [Hash{String=>Array<String>, String, nil}, nil] :query
-
#
-
# @option req [Hash{String=>String, Integer, Array<String, Integer, nil>, nil}, nil] :headers
-
#
-
# @option req [Object, nil] :body
-
#
-
# @option req [Symbol, Integer, Array<Symbol, Integer>, Proc, nil] :unwrap
-
#
-
# @option req [Class<OpenAI::Internal::Type::BasePage>, nil] :page
-
#
-
# @option req [Class<OpenAI::Internal::Type::BaseStream>, nil] :stream
-
#
-
# @option req [OpenAI::Internal::Type::Converter, Class, nil] :model
-
#
-
# @param opts [Hash{Symbol=>Object}] .
-
#
-
# @option opts [String, nil] :idempotency_key
-
#
-
# @option opts [Hash{String=>Array<String>, String, nil}, nil] :extra_query
-
#
-
# @option opts [Hash{String=>String, nil}, nil] :extra_headers
-
#
-
# @option opts [Object, nil] :extra_body
-
#
-
# @option opts [Integer, nil] :max_retries
-
#
-
# @option opts [Float, nil] :timeout
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
private def build_request(req, opts)
-
2
method, uninterpolated_path = req.fetch_values(:method, :path)
-
-
2
path = OpenAI::Internal::Util.interpolate_path(uninterpolated_path)
-
-
2
query = OpenAI::Internal::Util.deep_merge(req[:query].to_h, opts[:extra_query].to_h)
-
-
2
headers = OpenAI::Internal::Util.normalized_headers(
-
@headers,
-
auth_headers,
-
req[:headers].to_h,
-
opts[:extra_headers].to_h
-
)
-
-
2
else: 2
if @idempotency_header &&
-
!headers.key?(@idempotency_header) &&
-
then: 0
(!Net::HTTP::IDEMPOTENT_METHODS_.include?(method.to_s.upcase) || opts.key?(:idempotency_key))
-
headers[@idempotency_header] = opts.fetch(:idempotency_key) { generate_idempotency_key }
-
end
-
-
2
else: 0
then: 2
unless headers.key?("x-stainless-retry-count")
-
2
headers["x-stainless-retry-count"] = "0"
-
end
-
-
2
timeout = opts.fetch(:timeout, @timeout).to_f.clamp(0..)
-
2
else: 0
then: 2
unless headers.key?("x-stainless-timeout") || timeout.zero?
-
2
headers["x-stainless-timeout"] = timeout.to_s
-
end
-
-
28
headers.reject! { |_, v| v.to_s.empty? }
-
-
body =
-
case method
-
2
in: 0
in :get | :head | :options | :trace
-
nil
-
else: 2
else
-
2
OpenAI::Internal::Util.deep_merge(*[req[:body], opts[:extra_body]].compact)
-
end
-
-
2
url = OpenAI::Internal::Util.join_parsed_uri(
-
@base_url_components,
-
{**req, path: path, query: query}
-
)
-
2
headers, encoded = OpenAI::Internal::Util.encode_content(headers, body)
-
{
-
2
method: method,
-
url: url,
-
headers: headers,
-
body: encoded,
-
max_retries: opts.fetch(:max_retries, @max_retries),
-
timeout: timeout
-
}
-
end
-
-
# @api private
-
#
-
# @param headers [Hash{String=>String}]
-
# @param retry_count [Integer]
-
#
-
# @return [Float]
-
1
private def retry_delay(headers, retry_count:)
-
# Non-standard extension
-
then: 0
else: 0
span = Float(headers["retry-after-ms"], exception: false)&.then { _1 / 1000 }
-
then: 0
else: 0
return span if span
-
-
retry_header = headers["retry-after"]
-
then: 0
else: 0
return span if (span = Float(retry_header, exception: false))
-
-
then: 0
else: 0
span = retry_header&.then do
-
Time.httpdate(_1) - Time.now
-
rescue ArgumentError
-
nil
-
end
-
then: 0
else: 0
return span if span
-
-
scale = retry_count**2
-
jitter = 1 - (0.25 * rand)
-
(@initial_retry_delay * scale * jitter).clamp(0, @max_retry_delay)
-
end
-
-
# @api private
-
#
-
# @param request [Hash{Symbol=>Object}] .
-
#
-
# @option request [Symbol] :method
-
#
-
# @option request [URI::Generic] :url
-
#
-
# @option request [Hash{String=>String}] :headers
-
#
-
# @option request [Object] :body
-
#
-
# @option request [Integer] :max_retries
-
#
-
# @option request [Float] :timeout
-
#
-
# @param redirect_count [Integer]
-
#
-
# @param retry_count [Integer]
-
#
-
# @param send_retry_header [Boolean]
-
#
-
# @raise [OpenAI::Errors::APIError]
-
# @return [Array(Integer, Net::HTTPResponse, Enumerable<String>)]
-
1
private def send_request(request, redirect_count:, retry_count:, send_retry_header:)
-
2
url, headers, max_retries, timeout = request.fetch_values(:url, :headers, :max_retries, :timeout)
-
2
input = {**request.except(:timeout), deadline: OpenAI::Internal::Util.monotonic_secs + timeout}
-
-
2
then: 2
else: 0
if send_retry_header
-
2
headers["x-stainless-retry-count"] = retry_count.to_s
-
end
-
-
begin
-
2
status, response, stream = @requester.execute(input)
-
rescue OpenAI::Errors::APIConnectionError => e
-
status = e
-
end
-
-
else: 0
case status
-
2
in: 0
in ..299
-
[status, response, stream]
-
in: 0
in 300..399 if redirect_count >= self.class::MAX_REDIRECTS
-
self.class.reap_connection!(status, stream: stream)
-
-
message = "Failed to complete the request within #{self.class::MAX_REDIRECTS} redirects."
-
raise OpenAI::Errors::APIConnectionError.new(url: url, response: response, message: message)
-
in: 0
in 300..399
-
self.class.reap_connection!(status, stream: stream)
-
-
request = self.class.follow_redirect(request, status: status, response_headers: response)
-
send_request(
-
request,
-
redirect_count: redirect_count + 1,
-
retry_count: retry_count,
-
send_retry_header: send_retry_header
-
)
-
in: 0
in OpenAI::Errors::APIConnectionError if retry_count >= max_retries
-
raise status
-
in: 2
in (400..) if retry_count >= max_retries || !self.class.should_retry?(status, headers: response)
-
2
decoded = Kernel.then do
-
2
OpenAI::Internal::Util.decode_content(response, stream: stream, suppress_error: true)
-
ensure
-
2
self.class.reap_connection!(status, stream: stream)
-
end
-
-
2
raise OpenAI::Errors::APIStatusError.for(
-
url: url,
-
status: status,
-
body: decoded,
-
request: nil,
-
response: response
-
)
-
in: 0
in (400..) | OpenAI::Errors::APIConnectionError
-
self.class.reap_connection!(status, stream: stream)
-
-
delay = retry_delay(response || {}, retry_count: retry_count)
-
sleep(delay)
-
-
send_request(
-
request,
-
redirect_count: redirect_count,
-
retry_count: retry_count + 1,
-
send_retry_header: send_retry_header
-
)
-
end
-
end
-
-
# Execute the request specified by `req`. This is the method that all resource
-
# methods call into.
-
#
-
# @overload request(method, path, query: {}, headers: {}, body: nil, unwrap: nil, page: nil, stream: nil, model: OpenAI::Internal::Type::Unknown, options: {})
-
#
-
# @param method [Symbol]
-
#
-
# @param path [String, Array<String>]
-
#
-
# @param query [Hash{String=>Array<String>, String, nil}, nil]
-
#
-
# @param headers [Hash{String=>String, Integer, Array<String, Integer, nil>, nil}, nil]
-
#
-
# @param body [Object, nil]
-
#
-
# @param unwrap [Symbol, Integer, Array<Symbol, Integer>, Proc, nil]
-
#
-
# @param page [Class<OpenAI::Internal::Type::BasePage>, nil]
-
#
-
# @param stream [Class<OpenAI::Internal::Type::BaseStream>, nil]
-
#
-
# @param model [OpenAI::Internal::Type::Converter, Class, nil]
-
#
-
# @param options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil] .
-
#
-
# @option options [String, nil] :idempotency_key
-
#
-
# @option options [Hash{String=>Array<String>, String, nil}, nil] :extra_query
-
#
-
# @option options [Hash{String=>String, nil}, nil] :extra_headers
-
#
-
# @option options [Object, nil] :extra_body
-
#
-
# @option options [Integer, nil] :max_retries
-
#
-
# @option options [Float, nil] :timeout
-
#
-
# @raise [OpenAI::Errors::APIError]
-
# @return [Object]
-
1
def request(req)
-
2
self.class.validate!(req)
-
2
model = req.fetch(:model) { OpenAI::Internal::Type::Unknown }
-
2
opts = req[:options].to_h
-
2
unwrap = req[:unwrap]
-
2
OpenAI::RequestOptions.validate!(opts)
-
2
request = build_request(req.except(:options), opts)
-
2
url = request.fetch(:url)
-
-
# Don't send the current retry count in the headers if the caller modified the header defaults.
-
2
send_retry_header = request.fetch(:headers)["x-stainless-retry-count"] == "0"
-
2
status, response, stream = send_request(
-
request,
-
redirect_count: 0,
-
retry_count: 0,
-
send_retry_header: send_retry_header
-
)
-
-
decoded = OpenAI::Internal::Util.decode_content(response, stream: stream)
-
case req
-
in: 0
in {stream: Class => st}
-
st.new(
-
model: model,
-
url: url,
-
status: status,
-
response: response,
-
unwrap: unwrap,
-
stream: decoded
-
)
-
in: 0
in {page: Class => page}
-
page.new(client: self, req: req, headers: response, page_data: decoded)
-
else: 0
else
-
unwrapped = OpenAI::Internal::Util.dig(decoded, unwrap)
-
OpenAI::Internal::Type::Converter.coerce(model, unwrapped)
-
end
-
end
-
-
# @api private
-
#
-
# @return [String]
-
1
def inspect
-
# rubocop:disable Layout/LineLength
-
"#<#{self.class.name}:0x#{object_id.to_s(16)} base_url=#{@base_url} max_retries=#{@max_retries} timeout=#{@timeout}>"
-
# rubocop:enable Layout/LineLength
-
end
-
-
1
define_sorbet_constant!(:RequestComponents) do
-
T.type_alias do
-
{
-
method: Symbol,
-
path: T.any(String, T::Array[String]),
-
query: T.nilable(T::Hash[String, T.nilable(T.any(T::Array[String], String))]),
-
headers: T.nilable(
-
T::Hash[String,
-
T.nilable(
-
T.any(
-
String,
-
Integer,
-
T::Array[T.nilable(T.any(String, Integer))]
-
)
-
)]
-
),
-
body: T.nilable(T.anything),
-
unwrap: T.nilable(
-
T.any(
-
Symbol,
-
Integer,
-
T::Array[T.any(Symbol, Integer)],
-
T.proc.params(arg0: T.anything).returns(T.anything)
-
)
-
),
-
page: T.nilable(T::Class[OpenAI::Internal::Type::BasePage[OpenAI::Internal::Type::BaseModel]]),
-
stream: T.nilable(
-
T::Class[OpenAI::Internal::Type::BaseStream[T.anything,
-
OpenAI::Internal::Type::BaseModel]]
-
),
-
model: T.nilable(OpenAI::Internal::Type::Converter::Input),
-
options: T.nilable(OpenAI::RequestOptions::OrHash)
-
}
-
end
-
end
-
1
define_sorbet_constant!(:RequestInput) do
-
T.type_alias do
-
{
-
method: Symbol,
-
url: URI::Generic,
-
headers: T::Hash[String, String],
-
body: T.anything,
-
max_retries: Integer,
-
timeout: Float
-
}
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
1
module Transport
-
# @api private
-
1
class PooledNetRequester
-
1
extend OpenAI::Internal::Util::SorbetRuntimeSupport
-
-
# from the golang stdlib
-
# https://github.com/golang/go/blob/c8eced8580028328fde7c03cbfcb720ce15b2358/src/net/http/transport.go#L49
-
1
KEEP_ALIVE_TIMEOUT = 30
-
-
1
DEFAULT_MAX_CONNECTIONS = [Etc.nprocessors, 99].max
-
-
1
class << self
-
# @api private
-
#
-
# @param url [URI::Generic]
-
#
-
# @return [Net::HTTP]
-
1
def connect(url)
-
port =
-
else: 0
case [url.port, url.scheme]
-
2
in: 0
in [Integer, _]
-
url.port
-
in: 0
in [nil, "http" | "ws"]
-
Net::HTTP.http_default_port
-
in: 2
in [nil, "https" | "wss"]
-
2
Net::HTTP.https_default_port
-
end
-
-
2
Net::HTTP.new(url.host, port).tap do
-
2
_1.use_ssl = %w[https wss].include?(url.scheme)
-
2
_1.max_retries = 0
-
end
-
end
-
-
# @api private
-
#
-
# @param conn [Net::HTTP]
-
# @param deadline [Float]
-
1
def calibrate_socket_timeout(conn, deadline)
-
10
timeout = deadline - OpenAI::Internal::Util.monotonic_secs
-
10
conn.open_timeout = conn.read_timeout = conn.write_timeout = conn.continue_timeout = timeout
-
end
-
-
# @api private
-
#
-
# @param request [Hash{Symbol=>Object}] .
-
#
-
# @option request [Symbol] :method
-
#
-
# @option request [URI::Generic] :url
-
#
-
# @option request [Hash{String=>String}] :headers
-
#
-
# @param blk [Proc]
-
#
-
# @yieldparam [String]
-
# @return [Array(Net::HTTPGenericRequest, Proc)]
-
1
def build_request(request, &blk)
-
2
method, url, headers, body = request.fetch_values(:method, :url, :headers, :body)
-
2
req = Net::HTTPGenericRequest.new(
-
method.to_s.upcase,
-
!body.nil?,
-
method != :head,
-
URI(url.to_s) # ensure we construct a URI class of the right scheme
-
)
-
-
24
headers.each { req[_1] = _2 }
-
-
else: 0
case body
-
2
in: 0
in nil
-
nil
-
in: 2
in String
-
2
else: 0
then: 2
req["content-length"] ||= body.bytesize.to_s unless req["transfer-encoding"]
-
2
req.body_stream = OpenAI::Internal::Util::ReadIOAdapter.new(body, &blk)
-
in: 0
in StringIO
-
else: 0
then: 0
req["content-length"] ||= body.size.to_s unless req["transfer-encoding"]
-
req.body_stream = OpenAI::Internal::Util::ReadIOAdapter.new(body, &blk)
-
in: 0
in Pathname | IO | Enumerator
-
else: 0
then: 0
req["transfer-encoding"] ||= "chunked" unless req["content-length"]
-
req.body_stream = OpenAI::Internal::Util::ReadIOAdapter.new(body, &blk)
-
end
-
-
2
then: 2
else: 0
[req, req.body_stream&.method(:close)]
-
end
-
end
-
-
# @api private
-
#
-
# @param url [URI::Generic]
-
# @param deadline [Float]
-
# @param blk [Proc]
-
#
-
# @raise [Timeout::Error]
-
# @yieldparam [Net::HTTP]
-
1
private def with_pool(url, deadline:, &blk)
-
2
origin = OpenAI::Internal::Util.uri_origin(url)
-
2
timeout = deadline - OpenAI::Internal::Util.monotonic_secs
-
pool =
-
2
@mutex.synchronize do
-
2
@pools[origin] ||= ConnectionPool.new(size: @size) do
-
2
self.class.connect(url)
-
end
-
end
-
-
2
pool.with(timeout: timeout, &blk)
-
end
-
-
# @api private
-
#
-
# @param request [Hash{Symbol=>Object}] .
-
#
-
# @option request [Symbol] :method
-
#
-
# @option request [URI::Generic] :url
-
#
-
# @option request [Hash{String=>String}] :headers
-
#
-
# @option request [Object] :body
-
#
-
# @option request [Float] :deadline
-
#
-
# @return [Array(Integer, Net::HTTPResponse, Enumerable<String>)]
-
1
def execute(request)
-
2
url, deadline = request.fetch_values(:url, :deadline)
-
-
2
req = nil
-
2
eof = false
-
2
finished = false
-
2
closing = nil
-
-
# rubocop:disable Metrics/BlockLength
-
2
enum = Enumerator.new do |y|
-
2
with_pool(url, deadline: deadline) do |conn|
-
2
then: 0
else: 2
next if finished
-
-
2
req, closing = self.class.build_request(request) do
-
4
self.class.calibrate_socket_timeout(conn, deadline)
-
end
-
-
2
self.class.calibrate_socket_timeout(conn, deadline)
-
2
else: 0
then: 2
unless conn.started?
-
2
conn.keep_alive_timeout = self.class::KEEP_ALIVE_TIMEOUT
-
2
conn.start
-
end
-
-
2
self.class.calibrate_socket_timeout(conn, deadline)
-
2
conn.request(req) do |rsp|
-
2
y << [conn, req, rsp]
-
2
then: 0
else: 2
break if finished
-
-
2
rsp.read_body do |bytes|
-
2
y << bytes.force_encoding(Encoding::BINARY)
-
2
then: 0
else: 2
break if finished
-
-
2
self.class.calibrate_socket_timeout(conn, deadline)
-
end
-
2
eof = true
-
end
-
end
-
rescue Timeout::Error
-
raise OpenAI::Errors::APITimeoutError.new(url: url, request: req)
-
rescue StandardError
-
raise OpenAI::Errors::APIConnectionError.new(url: url, request: req)
-
end
-
# rubocop:enable Metrics/BlockLength
-
-
2
conn, _, response = enum.next
-
2
body = OpenAI::Internal::Util.fused_enum(enum, external: true) do
-
2
finished = true
-
2
tap do
-
2
enum.next
-
rescue StopIteration
-
2
nil
-
end
-
ensure
-
2
then: 0
else: 0
then: 0
else: 2
conn.finish if !eof && conn&.started?
-
2
then: 2
else: 0
closing&.call
-
end
-
2
[Integer(response.code), response, body]
-
end
-
-
# @api private
-
#
-
# @param size [Integer]
-
1
def initialize(size: self.class::DEFAULT_MAX_CONNECTIONS)
-
2
@mutex = Mutex.new
-
2
@size = size
-
2
@pools = {}
-
end
-
-
1
define_sorbet_constant!(:Request) do
-
T.type_alias do
-
{
-
method: Symbol,
-
url: URI::Generic,
-
headers: T::Hash[String, String],
-
body: T.anything,
-
deadline: Float
-
}
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
1
module Type
-
# @api private
-
#
-
# @abstract
-
#
-
# @generic Elem
-
#
-
# Array of items of a given type.
-
1
class ArrayOf
-
1
include OpenAI::Internal::Type::Converter
-
1
include OpenAI::Internal::Util::SorbetRuntimeSupport
-
-
1
private_class_method :new
-
-
# @overload [](type_info, spec = {})
-
#
-
# @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class]
-
#
-
# @param spec [Hash{Symbol=>Object}] .
-
#
-
# @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const
-
#
-
# @option spec [Proc] :enum
-
#
-
# @option spec [Proc] :union
-
#
-
# @option spec [Boolean] :"nil?"
-
#
-
# @return [self]
-
1
def self.[](...) = new(...)
-
-
# @api public
-
#
-
# @param other [Object]
-
#
-
# @return [Boolean]
-
1
def ===(other) = other.is_a?(Array) && other.all?(item_type)
-
-
# @api public
-
#
-
# @param other [Object]
-
#
-
# @return [Boolean]
-
1
def ==(other)
-
# rubocop:disable Layout/LineLength
-
other.is_a?(OpenAI::Internal::Type::ArrayOf) && other.nilable? == nilable? && other.item_type == item_type
-
# rubocop:enable Layout/LineLength
-
end
-
-
# @api public
-
#
-
# @return [Integer]
-
1
def hash = [self.class, item_type].hash
-
-
# @api private
-
#
-
# @param value [Array<Object>, Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :translate_names
-
#
-
# @option state [Boolean] :strictness
-
#
-
# @option state [Hash{Symbol=>Object}] :exactness
-
#
-
# @option state [Class<StandardError>] :error
-
#
-
# @option state [Integer] :branched
-
#
-
# @return [Array<Object>, Object]
-
1
def coerce(value, state:)
-
exactness = state.fetch(:exactness)
-
-
else: 0
then: 0
unless value.is_a?(Array)
-
exactness[:no] += 1
-
state[:error] = TypeError.new("#{value.class} can't be coerced into #{Array}")
-
return value
-
end
-
-
target = item_type
-
exactness[:yes] += 1
-
value
-
.map do |item|
-
case [nilable?, item]
-
in: 0
in [true, nil]
-
exactness[:yes] += 1
-
nil
-
else: 0
else
-
OpenAI::Internal::Type::Converter.coerce(target, item, state: state)
-
end
-
end
-
end
-
-
# @api private
-
#
-
# @param value [Array<Object>, Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :can_retry
-
#
-
# @return [Array<Object>, Object]
-
1
def dump(value, state:)
-
target = item_type
-
then: 0
if value.is_a?(Array)
-
value.map do
-
OpenAI::Internal::Type::Converter.dump(target, _1, state: state)
-
end
-
else: 0
else
-
super
-
end
-
end
-
-
# @api private
-
#
-
# @return [Object]
-
1
def to_sorbet_type
-
T::Array[OpenAI::Internal::Util::SorbetRuntimeSupport.to_sorbet_type(item_type)]
-
end
-
-
# @api private
-
#
-
# @return [generic<Elem>]
-
1
protected def item_type = @item_type_fn.call
-
-
# @api private
-
#
-
# @return [Boolean]
-
1
protected def nilable? = @nilable
-
-
# @api private
-
#
-
# @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class]
-
#
-
# @param spec [Hash{Symbol=>Object}] .
-
#
-
# @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const
-
#
-
# @option spec [Proc] :enum
-
#
-
# @option spec [Proc] :union
-
#
-
# @option spec [Boolean] :"nil?"
-
1
def initialize(type_info, spec = {})
-
105
@item_type_fn = OpenAI::Internal::Type::Converter.type_info(type_info || spec)
-
105
@nilable = spec.fetch(:nil?, false)
-
end
-
-
# @api private
-
#
-
# @param depth [Integer]
-
#
-
# @return [String]
-
1
def inspect(depth: 0)
-
items = OpenAI::Internal::Type::Converter.inspect(item_type, depth: depth.succ)
-
-
then: 0
else: 0
"#{self.class}[#{[items, nilable? ? 'nil' : nil].compact.join(' | ')}]"
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
1
module Type
-
# @abstract
-
1
class BaseModel
-
1
extend OpenAI::Internal::Type::Converter
-
1
extend OpenAI::Internal::Util::SorbetRuntimeSupport
-
-
1
class << self
-
# @api private
-
#
-
# Assumes superclass fields are totally defined before fields are accessed /
-
# defined on subclasses.
-
#
-
# @param child [Class<OpenAI::Internal::Type::BaseModel>]
-
1
def inherited(child)
-
809
super
-
809
child.known_fields.replace(known_fields.dup)
-
end
-
-
# @api private
-
#
-
# @return [Hash{Symbol=>Hash{Symbol=>Object}}]
-
1
def known_fields = @known_fields ||= {}
-
-
# @api private
-
#
-
# @return [Hash{Symbol=>Hash{Symbol=>Object}}]
-
1
def fields
-
known_fields.transform_values do |field|
-
{**field.except(:type_fn), type: field.fetch(:type_fn).call}
-
end
-
end
-
-
# @api private
-
#
-
# @param name_sym [Symbol]
-
#
-
# @param required [Boolean]
-
#
-
# @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class]
-
#
-
# @param spec [Hash{Symbol=>Object}] .
-
#
-
# @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const
-
#
-
# @option spec [Proc] :enum
-
#
-
# @option spec [Proc] :union
-
#
-
# @option spec [Boolean] :"nil?"
-
1
private def add_field(name_sym, required:, type_info:, spec:)
-
type_fn, info =
-
else: 0
case type_info
-
2923
in: 2164
in Proc | OpenAI::Internal::Type::Converter | Class
-
2164
[OpenAI::Internal::Type::Converter.type_info({**spec, union: type_info}), spec]
-
in: 759
in Hash
-
759
[OpenAI::Internal::Type::Converter.type_info(type_info), type_info]
-
end
-
-
2923
setter = :"#{name_sym}="
-
2923
api_name = info.fetch(:api_name, name_sym)
-
2923
nilable = info.fetch(:nil?, false)
-
2923
then: 1785
else: 1138
const = required && !nilable ? info.fetch(:const, OpenAI::Internal::OMIT) : OpenAI::Internal::OMIT
-
-
2927
then: 2
else: 2921
[name_sym, setter].each { undef_method(_1) } if known_fields.key?(name_sym)
-
-
2923
known_fields[name_sym] =
-
{
-
mode: @mode,
-
api_name: api_name,
-
required: required,
-
nilable: nilable,
-
const: const,
-
type_fn: type_fn
-
}
-
-
2923
define_method(setter) do |value|
-
target = type_fn.call
-
state = OpenAI::Internal::Type::Converter.new_coerce_state(translate_names: false)
-
coerced = OpenAI::Internal::Type::Converter.coerce(target, value, state: state)
-
error = @coerced.store(name_sym, state.fetch(:error) || true)
-
stored =
-
case [target, error]
-
in: 0
in [OpenAI::Internal::Type::Converter | Symbol, nil]
-
coerced
-
else: 0
else
-
value
-
end
-
@data.store(name_sym, stored)
-
end
-
-
# rubocop:disable Style/CaseEquality
-
# rubocop:disable Metrics/BlockLength
-
2923
define_method(name_sym) do
-
target = type_fn.call
-
-
case @coerced[name_sym]
-
in: 0
in true | false if OpenAI::Internal::Type::Converter === target
-
@data.fetch(name_sym)
-
in: 0
in ::StandardError => e
-
raise OpenAI::Errors::ConversionError.new(
-
on: self.class,
-
method: __method__,
-
target: target,
-
value: @data.fetch(name_sym),
-
cause: e
-
)
-
else: 0
else
-
Kernel.then do
-
then: 0
else: 0
value = @data.fetch(name_sym) { const == OpenAI::Internal::OMIT ? nil : const }
-
state = OpenAI::Internal::Type::Converter.new_coerce_state(translate_names: false)
-
then: 0
if (nilable || !required) && value.nil?
-
nil
-
else: 0
else
-
OpenAI::Internal::Type::Converter.coerce(
-
target, value, state: state
-
)
-
end
-
rescue StandardError => e
-
raise OpenAI::Errors::ConversionError.new(
-
on: self.class,
-
method: __method__,
-
target: target,
-
value: value,
-
cause: e
-
)
-
end
-
end
-
end
-
# rubocop:enable Metrics/BlockLength
-
# rubocop:enable Style/CaseEquality
-
end
-
-
# @api private
-
#
-
# @param name_sym [Symbol]
-
#
-
# @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class]
-
#
-
# @param spec [Hash{Symbol=>Object}] .
-
#
-
# @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const
-
#
-
# @option spec [Proc] :enum
-
#
-
# @option spec [Proc] :union
-
#
-
# @option spec [Boolean] :"nil?"
-
1
def required(name_sym, type_info, spec = {})
-
1878
add_field(name_sym, required: true, type_info: type_info, spec: spec)
-
end
-
-
# @api private
-
#
-
# @param name_sym [Symbol]
-
#
-
# @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class]
-
#
-
# @param spec [Hash{Symbol=>Object}] .
-
#
-
# @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const
-
#
-
# @option spec [Proc] :enum
-
#
-
# @option spec [Proc] :union
-
#
-
# @option spec [Boolean] :"nil?"
-
1
def optional(name_sym, type_info, spec = {})
-
1045
add_field(name_sym, required: false, type_info: type_info, spec: spec)
-
end
-
-
# @api private
-
#
-
# `request_only` attributes not excluded from `.#coerce` when receiving responses
-
# even if well behaved servers should not send them
-
#
-
# @param blk [Proc]
-
1
private def request_only(&blk)
-
@mode = :dump
-
blk.call
-
ensure
-
@mode = nil
-
end
-
-
# @api private
-
#
-
# `response_only` attributes are omitted from `.#dump` when making requests
-
#
-
# @param blk [Proc]
-
1
private def response_only(&blk)
-
@mode = :coerce
-
blk.call
-
ensure
-
@mode = nil
-
end
-
-
# @api public
-
#
-
# @param other [Object]
-
#
-
# @return [Boolean]
-
1
def ==(other)
-
other.is_a?(Class) && other <= OpenAI::Internal::Type::BaseModel && other.fields == fields
-
end
-
-
# @api public
-
#
-
# @return [Integer]
-
1
def hash = fields.hash
-
end
-
-
# @api public
-
#
-
# @param other [Object]
-
#
-
# @return [Boolean]
-
1
def ==(other) = self.class == other.class && @data == other.to_h
-
-
# @api public
-
#
-
# @return [Integer]
-
1
def hash = [self.class, @data].hash
-
-
1
class << self
-
# @api private
-
#
-
# @param value [OpenAI::Internal::Type::BaseModel, Hash{Object=>Object}, Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :translate_names
-
#
-
# @option state [Boolean] :strictness
-
#
-
# @option state [Hash{Symbol=>Object}] :exactness
-
#
-
# @option state [Class<StandardError>] :error
-
#
-
# @option state [Integer] :branched
-
#
-
# @return [self, Object]
-
1
def coerce(value, state:)
-
exactness = state.fetch(:exactness)
-
-
then: 0
else: 0
if value.is_a?(self.class)
-
exactness[:yes] += 1
-
return value
-
end
-
-
else: 0
then: 0
unless (val = OpenAI::Internal::Util.coerce_hash(value)).is_a?(Hash)
-
exactness[:no] += 1
-
state[:error] = TypeError.new("#{value.class} can't be coerced into #{Hash}")
-
return value
-
end
-
exactness[:yes] += 1
-
-
keys = val.keys.to_set
-
instance = new
-
data = instance.to_h
-
viability = instance.instance_variable_get(:@coerced)
-
-
# rubocop:disable Metrics/BlockLength
-
fields.each do |name, field|
-
mode, required, target = field.fetch_values(:mode, :required, :type)
-
api_name, nilable, const = field.fetch_values(:api_name, :nilable, :const)
-
then: 0
else: 0
src_name = state.fetch(:translate_names) ? api_name : name
-
-
else: 0
then: 0
unless val.key?(src_name)
-
then: 0
if required && mode != :dump && const == OpenAI::Internal::OMIT
-
then: 0
else: 0
exactness[nilable ? :maybe : :no] += 1
-
else: 0
else
-
exactness[:yes] += 1
-
end
-
next
-
end
-
-
item = val.fetch(src_name)
-
keys.delete(src_name)
-
-
state[:error] = nil
-
converted =
-
then: 0
if item.nil? && (nilable || !required)
-
then: 0
else: 0
exactness[nilable ? :yes : :maybe] += 1
-
nil
-
else: 0
else
-
coerced = OpenAI::Internal::Type::Converter.coerce(target, item, state: state)
-
case target
-
in: 0
in OpenAI::Internal::Type::Converter | Symbol
-
coerced
-
else: 0
else
-
item
-
end
-
end
-
-
viability.store(name, state.fetch(:error) || true)
-
data.store(name, converted)
-
end
-
# rubocop:enable Metrics/BlockLength
-
-
keys.each { data.store(_1, val.fetch(_1)) }
-
instance
-
end
-
-
# @api private
-
#
-
# @param value [self, Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :can_retry
-
#
-
# @return [Hash{Object=>Object}, Object]
-
1
def dump(value, state:)
-
2
else: 2
then: 0
unless (coerced = OpenAI::Internal::Util.coerce_hash(value)).is_a?(Hash)
-
return super
-
end
-
-
2
acc = {}
-
-
2
coerced.each do |key, val|
-
6
then: 0
else: 6
name = key.is_a?(String) ? key.to_sym : key
-
case (field = known_fields[name])
-
6
in: 0
in nil
-
acc.store(name, super(val, state: state))
-
else: 6
else
-
6
api_name, mode, type_fn = field.fetch_values(:api_name, :mode, :type_fn)
-
case mode
-
6
in: 0
in :coerce
-
next
-
else: 6
else
-
6
target = type_fn.call
-
6
acc.store(api_name, OpenAI::Internal::Type::Converter.dump(target, val, state: state))
-
end
-
end
-
end
-
-
2
known_fields.each_value do |field|
-
50
api_name, mode, const = field.fetch_values(:api_name, :mode, :const)
-
50
then: 50
else: 0
next if mode == :coerce || acc.key?(api_name) || const == OpenAI::Internal::OMIT
-
acc.store(api_name, const)
-
end
-
-
2
acc
-
end
-
-
# @api private
-
#
-
# @return [Object]
-
1
def to_sorbet_type
-
self
-
end
-
end
-
-
1
class << self
-
# @api private
-
#
-
# @param model [OpenAI::Internal::Type::BaseModel]
-
# @param convert [Boolean]
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
def recursively_to_h(model, convert:)
-
rec = ->(x) do
-
case x
-
in: 0
in OpenAI::Internal::Type::BaseModel
-
then: 0
if convert
-
fields = x.class.known_fields
-
x.to_h.to_h do |key, val|
-
then: 0
else: 0
[key, rec.call(fields.key?(key) ? x.public_send(key) : val)]
-
rescue OpenAI::Errors::ConversionError
-
[key, rec.call(val)]
-
end
-
else: 0
else
-
rec.call(x.to_h)
-
end
-
in: 0
in Hash
-
x.transform_values(&rec)
-
in: 0
in Array
-
x.map(&rec)
-
else: 0
else
-
x
-
end
-
end
-
rec.call(model)
-
end
-
end
-
-
# @api public
-
#
-
# Returns the raw value associated with the given key, if found. Otherwise, nil is
-
# returned.
-
#
-
# It is valid to lookup keys that are not in the API spec, for example to access
-
# undocumented features. This method does not parse response data into
-
# higher-level types. Lookup by anything other than a Symbol is an ArgumentError.
-
#
-
# @param key [Symbol]
-
#
-
# @return [Object, nil]
-
1
def [](key)
-
else: 0
then: 0
unless key.instance_of?(Symbol)
-
raise ArgumentError.new("Expected symbol key for lookup, got #{key.inspect}")
-
end
-
-
@data[key]
-
end
-
-
# @api public
-
#
-
# Returns a Hash of the data underlying this object. O(1)
-
#
-
# Keys are Symbols and values are the raw values from the response. The return
-
# value indicates which values were ever set on the object. i.e. there will be a
-
# key in this hash if they ever were, even if the set value was nil.
-
#
-
# This method is not recursive. The returned value is shared by the object, so it
-
# should not be mutated.
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
def to_h = @data
-
-
1
alias_method :to_hash, :to_h
-
-
# @api public
-
#
-
# In addition to the behaviour of `#to_h`, this method will recursively call
-
# `#to_h` on nested models.
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
def deep_to_h = self.class.recursively_to_h(@data, convert: false)
-
-
# @param keys [Array<Symbol>, nil]
-
#
-
# @return [Hash{Symbol=>Object}]
-
#
-
# @example
-
# # `comparison_filter` is a `OpenAI::ComparisonFilter`
-
# comparison_filter => {
-
# key: key,
-
# type: type,
-
# value: value
-
# }
-
1
def deconstruct_keys(keys)
-
(keys || self.class.known_fields.keys)
-
.filter_map do |k|
-
else: 0
then: 0
unless self.class.known_fields.key?(k)
-
next
-
end
-
-
[k, public_send(k)]
-
end
-
.to_h
-
end
-
-
# @api public
-
#
-
# @param a [Object]
-
#
-
# @return [String]
-
1
def to_json(*a) = OpenAI::Internal::Type::Converter.dump(self.class, self).to_json(*a)
-
-
# @api public
-
#
-
# @param a [Object]
-
#
-
# @return [String]
-
1
def to_yaml(*a) = OpenAI::Internal::Type::Converter.dump(self.class, self).to_yaml(*a)
-
-
# Create a new instance of a model.
-
#
-
# @param data [Hash{Symbol=>Object}, self]
-
1
def initialize(data = {})
-
@data = {}
-
@coerced = {}
-
OpenAI::Internal::Util.coerce_hash!(data).each do
-
then: 0
if self.class.known_fields.key?(_1)
-
public_send(:"#{_1}=", _2)
-
else: 0
else
-
@data.store(_1, _2)
-
@coerced.store(_1, false)
-
end
-
end
-
end
-
-
1
class << self
-
# @api private
-
#
-
# @param depth [Integer]
-
#
-
# @return [String]
-
1
def inspect(depth: 0)
-
then: 0
else: 0
return super() if depth.positive?
-
-
depth = depth.succ
-
deferred = fields.transform_values do |field|
-
type, required, nilable = field.fetch_values(:type, :required, :nilable)
-
inspected = [
-
OpenAI::Internal::Type::Converter.inspect(type, depth: depth),
-
then: 0
else: 0
!required || nilable ? "nil" : nil
-
].compact.join(" | ")
-
-> { inspected }.tap { _1.define_singleton_method(:inspect) { call } }
-
end
-
-
"#{name}[#{deferred.inspect}]"
-
end
-
end
-
-
# @api public
-
#
-
# @return [String]
-
1
def to_s = deep_to_h.to_s
-
-
# @api private
-
#
-
# @return [String]
-
1
def inspect
-
converted = self.class.recursively_to_h(self, convert: true)
-
"#<#{self.class}:0x#{object_id.to_s(16)} #{converted}>"
-
end
-
-
1
define_sorbet_constant!(:KnownField) do
-
T.type_alias { {mode: T.nilable(Symbol), required: T::Boolean, nilable: T::Boolean} }
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
1
module Type
-
# @api private
-
#
-
# @generic Elem
-
#
-
# This module provides a base implementation for paginated responses in the SDK.
-
1
module BasePage
-
# rubocop:disable Lint/UnusedMethodArgument
-
-
# @api public
-
#
-
# @return [Boolean]
-
1
def next_page? = (raise NotImplementedError)
-
-
# @api public
-
#
-
# @raise [OpenAI::Errors::APIError]
-
# @return [self]
-
1
def next_page = (raise NotImplementedError)
-
-
# @api public
-
#
-
# @param blk [Proc]
-
#
-
# @yieldparam [generic<Elem>]
-
# @return [void]
-
1
def auto_paging_each(&blk) = (raise NotImplementedError)
-
-
# @return [Enumerable<generic<Elem>>]
-
1
def to_enum = super(:auto_paging_each)
-
-
1
alias_method :enum_for, :to_enum
-
-
# @api private
-
#
-
# @param client [OpenAI::Internal::Transport::BaseClient]
-
# @param req [Hash{Symbol=>Object}]
-
# @param headers [Hash{String=>String}, Net::HTTPHeader]
-
# @param page_data [Object]
-
1
def initialize(client:, req:, headers:, page_data:)
-
@client = client
-
@req = req
-
@model = req.fetch(:model)
-
super()
-
end
-
-
# rubocop:enable Lint/UnusedMethodArgument
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
1
module Type
-
# @api private
-
#
-
# @generic Elem
-
#
-
# This module provides a base implementation for streaming responses in the SDK.
-
#
-
# @see https://rubyapi.org/3.2/o/enumerable
-
1
module BaseStream
-
1
include Enumerable
-
-
1
class << self
-
# Attempt to close the underlying transport when the stream itself is garbage
-
# collected.
-
#
-
# This should not be relied upon for resource clean up, as the garbage collector
-
# is not guaranteed to run.
-
#
-
# @param stream [Enumerable<Object>]
-
#
-
# @return [Proc]
-
#
-
# @see https://rubyapi.org/3.2/o/objectspace#method-c-define_finalizer
-
1
def defer_closing(stream) = ->(_id) { OpenAI::Internal::Util.close_fused!(stream) }
-
end
-
-
# @api public
-
#
-
# @return [void]
-
1
def close = OpenAI::Internal::Util.close_fused!(@iterator)
-
-
# @api private
-
#
-
# @return [Enumerable<generic<Elem>>]
-
1
private def iterator = (raise NotImplementedError)
-
-
# @api public
-
#
-
# @param blk [Proc]
-
#
-
# @yieldparam [generic<Elem>]
-
# @return [void]
-
1
def each(&blk)
-
else: 0
then: 0
unless block_given?
-
raise ArgumentError.new("A block must be given to ##{__method__}")
-
end
-
@iterator.each(&blk)
-
end
-
-
# @api public
-
#
-
# @return [Enumerator<generic<Elem>>]
-
1
def to_enum = @iterator
-
-
1
alias_method :enum_for, :to_enum
-
-
# @api private
-
#
-
# @param model [Class, OpenAI::Internal::Type::Converter]
-
# @param url [URI::Generic]
-
# @param status [Integer]
-
# @param response [Net::HTTPResponse]
-
# @param unwrap [Symbol, Integer, Array<Symbol, Integer>, Proc]
-
# @param stream [Enumerable<Object>]
-
1
def initialize(model:, url:, status:, response:, unwrap:, stream:)
-
@model = model
-
@url = url
-
@status = status
-
@response = response
-
@unwrap = unwrap
-
@stream = stream
-
@iterator = iterator
-
-
ObjectSpace.define_finalizer(self, OpenAI::Internal::Type::BaseStream.defer_closing(@stream))
-
end
-
-
# @api private
-
#
-
# @return [String]
-
1
def inspect
-
model = OpenAI::Internal::Type::Converter.inspect(@model, depth: 1)
-
-
"#<#{self.class}[#{model}]:0x#{object_id.to_s(16)}>"
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
1
module Type
-
# @api private
-
#
-
# @abstract
-
#
-
# Ruby has no Boolean class; this is something for models to refer to.
-
1
class Boolean
-
1
extend OpenAI::Internal::Type::Converter
-
1
extend OpenAI::Internal::Util::SorbetRuntimeSupport
-
-
1
private_class_method :new
-
-
# @api public
-
#
-
# @param other [Object]
-
#
-
# @return [Boolean]
-
1
def self.===(other) = other == true || other == false
-
-
# @api public
-
#
-
# @param other [Object]
-
#
-
# @return [Boolean]
-
1
def self.==(other) = other.is_a?(Class) && other <= OpenAI::Internal::Type::Boolean
-
-
1
class << self
-
# @api private
-
#
-
# Coerce value to Boolean if possible, otherwise return the original value.
-
#
-
# @param value [Boolean, Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :translate_names
-
#
-
# @option state [Boolean] :strictness
-
#
-
# @option state [Hash{Symbol=>Object}] :exactness
-
#
-
# @option state [Class<StandardError>] :error
-
#
-
# @option state [Integer] :branched
-
#
-
# @return [Boolean, Object]
-
1
def coerce(value, state:)
-
then: 0
else: 0
state.fetch(:exactness)[value == true || value == false ? :yes : :no] += 1
-
value
-
end
-
-
# @!method dump(value, state:)
-
# @api private
-
#
-
# @param value [Boolean, Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :can_retry
-
#
-
# @return [Boolean, Object]
-
-
# @api private
-
#
-
# @return [Object]
-
1
def to_sorbet_type
-
T::Boolean
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
1
module Type
-
# @api private
-
1
module Converter
-
1
extend OpenAI::Internal::Util::SorbetRuntimeSupport
-
-
# rubocop:disable Lint/UnusedMethodArgument
-
-
# @api private
-
#
-
# @param value [Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :translate_names
-
#
-
# @option state [Boolean] :strictness
-
#
-
# @option state [Hash{Symbol=>Object}] :exactness
-
#
-
# @option state [Class<StandardError>] :error
-
#
-
# @option state [Integer] :branched
-
#
-
# @return [Object]
-
1
def coerce(value, state:) = (raise NotImplementedError)
-
-
# @api private
-
#
-
# @param value [Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :can_retry
-
#
-
# @return [Object]
-
1
def dump(value, state:)
-
case value
-
49
in: 4
in Array
-
11
value.map { OpenAI::Internal::Type::Unknown.dump(_1, state: state) }
-
in: 17
in Hash
-
53
value.transform_values { OpenAI::Internal::Type::Unknown.dump(_1, state: state) }
-
in: 0
in OpenAI::Internal::Type::BaseModel
-
value.class.dump(value, state: state)
-
in: 0
in StringIO
-
value.string
-
in: 0
in Pathname | IO
-
then: 0
else: 0
state[:can_retry] = false if value.is_a?(IO)
-
OpenAI::FilePart.new(value)
-
in: 0
in OpenAI::FilePart
-
then: 0
else: 0
state[:can_retry] = false if value.content.is_a?(IO)
-
value
-
else: 28
else
-
28
value
-
end
-
end
-
-
# @api private
-
#
-
# @param depth [Integer]
-
#
-
# @return [String]
-
1
def inspect(depth: 0)
-
super()
-
end
-
-
# rubocop:enable Lint/UnusedMethodArgument
-
-
1
class << self
-
# @api private
-
#
-
# @param spec [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class] .
-
#
-
# @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const
-
#
-
# @option spec [Proc] :enum
-
#
-
# @option spec [Proc] :union
-
#
-
# @option spec [Boolean] :"nil?"
-
#
-
# @return [Proc]
-
1
def type_info(spec)
-
else: 0
case spec
-
6860
in: 1388
in Proc
-
1388
spec
-
in: 3035
in Hash
-
3035
then: 3035
else: 0
type_info(spec.slice(:const, :enum, :union).first&.last)
-
in: 0
in true | false
-
-> { OpenAI::Internal::Type::Boolean }
-
in: 2437
in OpenAI::Internal::Type::Converter | Class | Symbol
-
2441
-> { spec }
-
in: 0
in NilClass | Integer | Float
-
-> { spec.class }
-
end
-
end
-
-
# @api private
-
#
-
# @param translate_names [Boolean]
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
def new_coerce_state(translate_names: true)
-
{
-
6
translate_names: translate_names,
-
strictness: true,
-
exactness: {yes: 0, no: 0, maybe: 0},
-
error: nil,
-
branched: 0
-
}
-
end
-
-
# @api private
-
#
-
# Based on `target`, transform `value` into `target`, to the extent possible:
-
#
-
# 1. if the given `value` conforms to `target` already, return the given `value`
-
# 2. if it's possible and safe to convert the given `value` to `target`, then the
-
# converted value
-
# 3. otherwise, the given `value` unaltered
-
#
-
# The coercion process is subject to improvement between minor release versions.
-
# See https://docs.pydantic.dev/latest/concepts/unions/#smart-mode
-
#
-
# @param target [OpenAI::Internal::Type::Converter, Class]
-
#
-
# @param value [Object]
-
#
-
# @param state [Hash{Symbol=>Object}] The `strictness` is one of `true`, `false`. This informs the coercion strategy
-
# when we have to decide between multiple possible conversion targets:
-
#
-
# - `true`: the conversion must be exact, with minimum coercion.
-
# - `false`: the conversion can be approximate, with some coercion.
-
#
-
# The `exactness` is `Hash` with keys being one of `yes`, `no`, or `maybe`. For
-
# any given conversion attempt, the exactness will be updated based on how closely
-
# the value recursively matches the target type:
-
#
-
# - `yes`: the value can be converted to the target type with minimum coercion.
-
# - `maybe`: the value can be converted to the target type with some reasonable
-
# coercion.
-
# - `no`: the value cannot be converted to the target type.
-
#
-
# See implementation below for more details.
-
#
-
# @option state [Boolean] :translate_names
-
#
-
# @option state [Boolean] :strictness
-
#
-
# @option state [Hash{Symbol=>Object}] :exactness
-
#
-
# @option state [Class<StandardError>] :error
-
#
-
# @option state [Integer] :branched
-
#
-
# @return [Object]
-
1
def coerce(target, value, state: OpenAI::Internal::Type::Converter.new_coerce_state)
-
# rubocop:disable Metrics/BlockNesting
-
6
exactness = state.fetch(:exactness)
-
-
case target
-
6
in: 0
in OpenAI::Internal::Type::Converter
-
return target.coerce(value, state: state)
-
in: 6
in Class
-
6
then: 0
else: 6
if value.is_a?(target)
-
exactness[:yes] += 1
-
return value
-
end
-
-
case target
-
12
in: 0
in -> { _1 <= NilClass }
-
then: 0
else: 0
exactness[value.nil? ? :yes : :maybe] += 1
-
return nil
-
6
in: 0
in -> { _1 <= Integer }
-
case value
-
in: 0
in Integer
-
exactness[:yes] += 1
-
return value
-
else: 0
else
-
Kernel.then do
-
return Integer(value).tap { exactness[:maybe] += 1 }
-
rescue ArgumentError, TypeError => e
-
state[:error] = e
-
end
-
end
-
6
in: 0
in -> { _1 <= Float }
-
then: 0
if value.is_a?(Numeric)
-
exactness[:yes] += 1
-
return Float(value)
-
else: 0
else
-
Kernel.then do
-
return Float(value).tap { exactness[:maybe] += 1 }
-
rescue ArgumentError, TypeError => e
-
state[:error] = e
-
end
-
end
-
6
in: 6
in -> { _1 <= String }
-
case value
-
6
in: 0
in String | Symbol | Numeric
-
then: 0
else: 0
exactness[value.is_a?(Numeric) ? :maybe : :yes] += 1
-
return value.to_s
-
in: 0
in StringIO
-
exactness[:yes] += 1
-
return value.string
-
else: 6
else
-
6
state[:error] = TypeError.new("#{value.class} can't be coerced into #{String}")
-
end
-
in: 0
in -> { _1 <= Date || _1 <= Time }
-
Kernel.then do
-
return target.parse(value).tap { exactness[:yes] += 1 }
-
rescue ArgumentError, TypeError => e
-
state[:error] = e
-
end
-
in: 0
in -> { _1 <= StringIO } if value.is_a?(String)
-
exactness[:yes] += 1
-
else: 0
return StringIO.new(value.b)
-
else
-
end
-
in: 0
in Symbol
-
case value
-
in: 0
in Symbol | String
-
then: 0
if value.to_sym == target
-
exactness[:yes] += 1
-
return target
-
else: 0
else
-
exactness[:maybe] += 1
-
return value
-
end
-
else: 0
else
-
message = "cannot convert non-matching #{value.class} into #{target.inspect}"
-
state[:error] = ArgumentError.new(message)
-
else: 0
end
-
else
-
end
-
-
6
exactness[:no] += 1
-
6
value
-
# rubocop:enable Metrics/BlockNesting
-
end
-
-
# @api private
-
#
-
# @param target [OpenAI::Internal::Type::Converter, Class]
-
#
-
# @param value [Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :can_retry
-
#
-
# @return [Object]
-
1
def dump(target, value, state: {can_retry: true})
-
case target
-
8
in: 6
in OpenAI::Internal::Type::Converter
-
6
target.dump(value, state: state)
-
else: 2
else
-
2
OpenAI::Internal::Type::Unknown.dump(value, state: state)
-
end
-
end
-
-
# @api private
-
#
-
# @param target [Object]
-
# @param depth [Integer]
-
#
-
# @return [String]
-
1
def inspect(target, depth:)
-
case target
-
in: 0
in OpenAI::Internal::Type::Converter
-
target.inspect(depth: depth.succ)
-
else: 0
else
-
target.inspect
-
end
-
end
-
end
-
-
1
define_sorbet_constant!(:Input) do
-
T.type_alias { T.any(OpenAI::Internal::Type::Converter, T::Class[T.anything]) }
-
end
-
1
define_sorbet_constant!(:CoerceState) do
-
T.type_alias do
-
{
-
translate_names: T::Boolean,
-
strictness: T::Boolean,
-
exactness: {yes: Integer, no: Integer, maybe: Integer},
-
error: T::Class[StandardError],
-
branched: Integer
-
}
-
end
-
end
-
1
define_sorbet_constant!(:DumpState) do
-
T.type_alias { {can_retry: T::Boolean} }
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
1
module Type
-
# @api private
-
#
-
# A value from among a specified list of options. OpenAPI enum values map to Ruby
-
# values in the SDK as follows:
-
#
-
# 1. boolean => true | false
-
# 2. integer => Integer
-
# 3. float => Float
-
# 4. string => Symbol
-
#
-
# We can therefore convert string values to Symbols, but can't convert other
-
# values safely.
-
#
-
# @example
-
# # `chat_model` is a `OpenAI::ChatModel`
-
# case chat_model
-
# when OpenAI::ChatModel::GPT_4_1
-
# # ...
-
# when OpenAI::ChatModel::GPT_4_1_MINI
-
# # ...
-
# when OpenAI::ChatModel::GPT_4_1_NANO
-
# # ...
-
# else
-
# puts(chat_model)
-
# end
-
#
-
# @example
-
# case chat_model
-
# in :"gpt-4.1"
-
# # ...
-
# in :"gpt-4.1-mini"
-
# # ...
-
# in :"gpt-4.1-nano"
-
# # ...
-
# else
-
# puts(chat_model)
-
# end
-
1
module Enum
-
1
include OpenAI::Internal::Type::Converter
-
1
include OpenAI::Internal::Util::SorbetRuntimeSupport
-
-
# All of the valid Symbol values for this enum.
-
#
-
# @return [Array<NilClass, Boolean, Integer, Float, Symbol>]
-
1
def values = constants.map { const_get(_1) }
-
-
# @api public
-
#
-
# @param other [Object]
-
#
-
# @return [Boolean]
-
1
def ===(other) = values.include?(other)
-
-
# @api public
-
#
-
# @param other [Object]
-
#
-
# @return [Boolean]
-
1
def ==(other)
-
# rubocop:disable Style/CaseEquality
-
OpenAI::Internal::Type::Enum === other && other.values.to_set == values.to_set
-
# rubocop:enable Style/CaseEquality
-
end
-
-
# @api public
-
#
-
# @return [Integer]
-
1
def hash = values.to_set.hash
-
-
# @api private
-
#
-
# Unlike with primitives, `Enum` additionally validates that the value is a member
-
# of the enum.
-
#
-
# @param value [String, Symbol, Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :translate_names
-
#
-
# @option state [Boolean] :strictness
-
#
-
# @option state [Hash{Symbol=>Object}] :exactness
-
#
-
# @option state [Class<StandardError>] :error
-
#
-
# @option state [Integer] :branched
-
#
-
# @return [Symbol, Object]
-
1
def coerce(value, state:)
-
exactness = state.fetch(:exactness)
-
then: 0
else: 0
val = value.is_a?(String) ? value.to_sym : value
-
-
then: 0
if values.include?(val)
-
exactness[:yes] += 1
-
else: 0
val
-
then: 0
else: 0
then: 0
elsif values.first&.class == val.class
-
exactness[:maybe] += 1
-
value
-
else: 0
else
-
exactness[:no] += 1
-
state[:error] = TypeError.new("#{value.class} can't be coerced into #{self}")
-
value
-
end
-
end
-
-
# @!method dump(value, state:)
-
# @api private
-
#
-
# @param value [Symbol, Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :can_retry
-
#
-
# @return [Symbol, Object]
-
-
# @api private
-
#
-
# @return [Object]
-
1
def to_sorbet_type
-
types = values.map { OpenAI::Internal::Util::SorbetRuntimeSupport.to_sorbet_type(_1) }.uniq
-
case types
-
in: 0
in []
-
T.noreturn
-
in: 0
in [type]
-
type
-
else: 0
else
-
T.any(*types)
-
end
-
end
-
-
# @api private
-
#
-
# @param depth [Integer]
-
#
-
# @return [String]
-
1
def inspect(depth: 0)
-
then: 0
else: 0
if depth.positive?
-
then: 0
else: 0
return is_a?(Module) ? super() : self.class.name
-
end
-
-
members = values.map { OpenAI::Internal::Type::Converter.inspect(_1, depth: depth.succ) }
-
then: 0
else: 0
prefix = is_a?(Module) ? name : self.class.name
-
-
"#{prefix}[#{members.join(' | ')}]"
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
1
module Type
-
# @api private
-
#
-
# @abstract
-
#
-
# Either `Pathname` or `StringIO`, or `IO`, or
-
# `OpenAI::Internal::Type::FileInput`.
-
#
-
# Note: when `IO` is used, all retries are disabled, since many IO` streams are
-
# not rewindable.
-
1
class FileInput
-
1
extend OpenAI::Internal::Type::Converter
-
-
1
private_class_method :new
-
-
# @api public
-
#
-
# @param other [Object]
-
#
-
# @return [Boolean]
-
1
def self.===(other)
-
case other
-
in: 0
in Pathname | StringIO | IO | String | OpenAI::FilePart
-
true
-
else: 0
else
-
false
-
end
-
end
-
-
# @api public
-
#
-
# @param other [Object]
-
#
-
# @return [Boolean]
-
1
def self.==(other) = other.is_a?(Class) && other <= OpenAI::Internal::Type::FileInput
-
-
1
class << self
-
# @api private
-
#
-
# @param value [StringIO, String, Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :translate_names
-
#
-
# @option state [Boolean] :strictness
-
#
-
# @option state [Hash{Symbol=>Object}] :exactness
-
#
-
# @option state [Class<StandardError>] :error
-
#
-
# @option state [Integer] :branched
-
#
-
# @return [StringIO, Object]
-
1
def coerce(value, state:)
-
exactness = state.fetch(:exactness)
-
case value
-
in: 0
in String
-
exactness[:yes] += 1
-
StringIO.new(value)
-
in: 0
in StringIO
-
exactness[:yes] += 1
-
value
-
else: 0
else
-
state[:error] = TypeError.new("#{value.class} can't be coerced into #{StringIO}")
-
exactness[:no] += 1
-
value
-
end
-
end
-
-
# @api private
-
#
-
# @param value [Pathname, StringIO, IO, String, Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :can_retry
-
#
-
# @return [Pathname, StringIO, IO, String, Object]
-
1
def dump(value, state:)
-
# rubocop:disable Lint/DuplicateBranch
-
case value
-
in: 0
in IO
-
state[:can_retry] = false
-
in: 0
in OpenAI::FilePart if value.content.is_a?(IO)
-
else: 0
state[:can_retry] = false
-
else
-
end
-
# rubocop:enable Lint/DuplicateBranch
-
-
value
-
end
-
-
# @api private
-
#
-
# @return [Object]
-
1
def to_sorbet_type
-
T.any(Pathname, StringIO, IO, String, OpenAI::FilePart)
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
1
module Type
-
# @api private
-
#
-
# @abstract
-
#
-
# @generic Elem
-
#
-
# Hash of items of a given type.
-
1
class HashOf
-
1
include OpenAI::Internal::Type::Converter
-
1
include OpenAI::Internal::Util::SorbetRuntimeSupport
-
-
1
private_class_method :new
-
-
# @overload [](type_info, spec = {})
-
#
-
# @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class]
-
#
-
# @param spec [Hash{Symbol=>Object}] .
-
#
-
# @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const
-
#
-
# @option spec [Proc] :enum
-
#
-
# @option spec [Proc] :union
-
#
-
# @option spec [Boolean] :"nil?"
-
#
-
# @return [self]
-
1
def self.[](...) = new(...)
-
-
# @api public
-
#
-
# @param other [Object]
-
#
-
# @return [Boolean]
-
1
def ===(other)
-
type = item_type
-
case other
-
in: 0
in Hash
-
other.all? do |key, val|
-
case [key, val]
-
in: 0
in [Symbol | String, ^type]
-
true
-
else: 0
else
-
false
-
end
-
end
-
else: 0
else
-
false
-
end
-
end
-
-
# @api public
-
#
-
# @param other [Object]
-
#
-
# @return [Boolean]
-
1
def ==(other)
-
# rubocop:disable Layout/LineLength
-
other.is_a?(OpenAI::Internal::Type::HashOf) && other.nilable? == nilable? && other.item_type == item_type
-
# rubocop:enable Layout/LineLength
-
end
-
-
# @api public
-
#
-
# @return [Integer]
-
1
def hash = [self.class, item_type].hash
-
-
# @api private
-
#
-
# @param value [Hash{Object=>Object}, Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :translate_names
-
#
-
# @option state [Boolean] :strictness
-
#
-
# @option state [Hash{Symbol=>Object}] :exactness
-
#
-
# @option state [Class<StandardError>] :error
-
#
-
# @option state [Integer] :branched
-
#
-
# @return [Hash{Symbol=>Object}, Object]
-
1
def coerce(value, state:)
-
exactness = state.fetch(:exactness)
-
-
else: 0
then: 0
unless value.is_a?(Hash)
-
exactness[:no] += 1
-
state[:error] = TypeError.new("#{value.class} can't be coerced into #{Hash}")
-
return value
-
end
-
-
target = item_type
-
exactness[:yes] += 1
-
value
-
.to_h do |key, val|
-
then: 0
else: 0
k = key.is_a?(String) ? key.to_sym : key
-
v =
-
case [nilable?, val]
-
in: 0
in [true, nil]
-
exactness[:yes] += 1
-
nil
-
else: 0
else
-
OpenAI::Internal::Type::Converter.coerce(target, val, state: state)
-
end
-
-
else: 0
then: 0
exactness[:no] += 1 unless k.is_a?(Symbol)
-
[k, v]
-
end
-
end
-
-
# @api private
-
#
-
# @param value [Hash{Object=>Object}, Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :can_retry
-
#
-
# @return [Hash{Symbol=>Object}, Object]
-
1
def dump(value, state:)
-
target = item_type
-
then: 0
if value.is_a?(Hash)
-
value.transform_values do
-
OpenAI::Internal::Type::Converter.dump(target, _1, state: state)
-
end
-
else: 0
else
-
super
-
end
-
end
-
-
# @api private
-
#
-
# @return [Object]
-
1
def to_sorbet_type
-
T::Hash[OpenAI::Internal::Util::SorbetRuntimeSupport.to_sorbet_type(item_type)]
-
end
-
-
# @api private
-
#
-
# @return [generic<Elem>]
-
1
protected def item_type = @item_type_fn.call
-
-
# @api private
-
#
-
# @return [Boolean]
-
1
protected def nilable? = @nilable
-
-
# @api private
-
#
-
# @param type_info [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class]
-
#
-
# @param spec [Hash{Symbol=>Object}] .
-
#
-
# @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const
-
#
-
# @option spec [Proc] :enum
-
#
-
# @option spec [Proc] :union
-
#
-
# @option spec [Boolean] :"nil?"
-
1
def initialize(type_info, spec = {})
-
96
@item_type_fn = OpenAI::Internal::Type::Converter.type_info(type_info || spec)
-
96
@nilable = spec.fetch(:nil?, false)
-
end
-
-
# @api private
-
#
-
# @param depth [Integer]
-
#
-
# @return [String]
-
1
def inspect(depth: 0)
-
items = OpenAI::Internal::Type::Converter.inspect(item_type, depth: depth.succ)
-
-
then: 0
else: 0
"#{self.class}[#{[items, nilable? ? 'nil' : nil].compact.join(' | ')}]"
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
1
module Type
-
# @api private
-
1
module RequestParameters
-
# @!attribute request_options
-
# Options to specify HTTP behaviour for this request.
-
#
-
# @return [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# @param mod [Module]
-
1
def self.included(mod)
-
110
else: 110
then: 0
raise ArgumentError.new(mod) unless mod <= OpenAI::Internal::Type::BaseModel
-
-
110
mod.optional(:request_options, OpenAI::RequestOptions)
-
end
-
-
# @api private
-
1
module Converter
-
# @api private
-
#
-
# @param params [Object]
-
#
-
# @return [Array(Object, Hash{Symbol=>Object})]
-
1
def dump_request(params)
-
2
state = {can_retry: true}
-
case (dumped = dump(params, state: state))
-
2
in: 2
in Hash
-
2
options = OpenAI::Internal::Util.coerce_hash!(dumped[:request_options]).to_h
-
2
then: 2
else: 0
request_options = state.fetch(:can_retry) ? options : {**options, max_retries: 0}
-
2
[dumped.except(:request_options), request_options]
-
else: 0
else
-
[dumped, nil]
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
1
module Type
-
# @api private
-
#
-
# @example
-
# # `chat_completion_content_part` is a `OpenAI::Chat::ChatCompletionContentPart`
-
# case chat_completion_content_part
-
# when OpenAI::Chat::ChatCompletionContentPartText
-
# puts(chat_completion_content_part.text)
-
# when OpenAI::Chat::ChatCompletionContentPartImage
-
# puts(chat_completion_content_part.image_url)
-
# when OpenAI::Chat::ChatCompletionContentPartInputAudio
-
# puts(chat_completion_content_part.input_audio)
-
# else
-
# puts(chat_completion_content_part)
-
# end
-
#
-
# @example
-
# case chat_completion_content_part
-
# in {type: :text, text: text}
-
# puts(text)
-
# in {type: :image_url, image_url: image_url}
-
# puts(image_url)
-
# in {type: :input_audio, input_audio: input_audio}
-
# puts(input_audio)
-
# else
-
# puts(chat_completion_content_part)
-
# end
-
1
module Union
-
1
include OpenAI::Internal::Type::Converter
-
1
include OpenAI::Internal::Util::SorbetRuntimeSupport
-
-
# @api private
-
#
-
# All of the specified variant info for this union.
-
#
-
# @return [Array<Array(Symbol, Proc)>]
-
710
private def known_variants = (@known_variants ||= [])
-
-
# @api private
-
#
-
# @return [Array<Array(Symbol, Object)>]
-
1
protected def derefed_variants
-
known_variants.map { |key, variant_fn| [key, variant_fn.call] }
-
end
-
-
# All of the specified variants for this union.
-
#
-
# @return [Array<Object>]
-
1
def variants = derefed_variants.map(&:last)
-
-
# @api private
-
#
-
# @param property [Symbol]
-
1
private def discriminator(property)
-
else: 0
case property
-
73
in: 73
in Symbol
-
73
@discriminator = property
-
end
-
end
-
-
# @api private
-
#
-
# @param key [Symbol, Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class]
-
#
-
# @param spec [Hash{Symbol=>Object}, Proc, OpenAI::Internal::Type::Converter, Class] .
-
#
-
# @option spec [NilClass, TrueClass, FalseClass, Integer, Float, Symbol] :const
-
#
-
# @option spec [Proc] :enum
-
#
-
# @option spec [Proc] :union
-
#
-
# @option spec [Boolean] :"nil?"
-
1
private def variant(key, spec = nil)
-
variant_info =
-
else: 0
case key
-
701
in: 338
in Symbol
-
338
[key, OpenAI::Internal::Type::Converter.type_info(spec)]
-
in: 363
in Proc | OpenAI::Internal::Type::Converter | Class | Hash
-
363
[nil, OpenAI::Internal::Type::Converter.type_info(key)]
-
end
-
-
701
known_variants << variant_info
-
end
-
-
# @api private
-
#
-
# @param value [Object]
-
#
-
# @return [OpenAI::Internal::Type::Converter, Class, nil]
-
1
private def resolve_variant(value)
-
case [@discriminator, value]
-
6
in: 0
in [_, OpenAI::Internal::Type::BaseModel]
-
value.class
-
in: 0
in [Symbol, Hash]
-
key = value.fetch(@discriminator) do
-
value.fetch(@discriminator.to_s, OpenAI::Internal::OMIT)
-
end
-
-
then: 0
else: 0
return nil if key == OpenAI::Internal::OMIT
-
-
then: 0
else: 0
key = key.to_sym if key.is_a?(String)
-
then: 0
else: 0
then: 0
else: 0
known_variants.find { |k,| k == key }&.last&.call
-
else: 6
else
-
6
nil
-
end
-
end
-
-
# rubocop:disable Style/HashEachMethods
-
# rubocop:disable Style/CaseEquality
-
-
# @api public
-
#
-
# @param other [Object]
-
#
-
# @return [Boolean]
-
1
def ===(other)
-
2
known_variants.any? do |_, variant_fn|
-
38
variant_fn.call === other
-
end
-
end
-
-
# @api public
-
#
-
# @param other [Object]
-
#
-
# @return [Boolean]
-
1
def ==(other)
-
OpenAI::Internal::Type::Union === other && other.derefed_variants == derefed_variants
-
end
-
-
# @api public
-
#
-
# @return [Integer]
-
1
def hash = variants.hash
-
-
# @api private
-
#
-
# Tries to efficiently coerce the given value to one of the known variants.
-
#
-
# If the value cannot match any of the known variants, the coercion is considered
-
# non-viable and returns the original value.
-
#
-
# @param value [Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :translate_names
-
#
-
# @option state [Boolean] :strictness
-
#
-
# @option state [Hash{Symbol=>Object}] :exactness
-
#
-
# @option state [Class<StandardError>] :error
-
#
-
# @option state [Integer] :branched
-
#
-
# @return [Object]
-
1
def coerce(value, state:)
-
then: 0
else: 0
if (target = resolve_variant(value))
-
return OpenAI::Internal::Type::Converter.coerce(target, value, state: state)
-
end
-
-
strictness = state.fetch(:strictness)
-
exactness = state.fetch(:exactness)
-
-
alternatives = []
-
known_variants.each do |_, variant_fn|
-
target = variant_fn.call
-
exact = state[:exactness] = {yes: 0, no: 0, maybe: 0}
-
state[:branched] += 1
-
-
coerced = OpenAI::Internal::Type::Converter.coerce(target, value, state: state)
-
yes, no, maybe = exact.values
-
then: 0
if (no + maybe).zero? || (!strictness && yes.positive?)
-
exact.each { exactness[_1] += _2 }
-
state[:exactness] = exactness
-
else: 0
return coerced
-
then: 0
else: 0
elsif maybe.positive?
-
alternatives << [[-yes, -maybe, no], exact, coerced]
-
end
-
end
-
-
else: 0
case alternatives.sort_by!(&:first)
-
in: 0
in []
-
exactness[:no] += 1
-
state[:error] = ArgumentError.new("no matching variant for #{value.inspect}")
-
value
-
in: 0
in [[_, exact, coerced], *]
-
exact.each { exactness[_1] += _2 }
-
coerced
-
end
-
.tap { state[:exactness] = exactness }
-
ensure
-
state[:strictness] = strictness
-
end
-
-
# @api private
-
#
-
# @param value [Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :can_retry
-
#
-
# @return [Object]
-
1
def dump(value, state:)
-
6
then: 0
else: 6
if (target = resolve_variant(value))
-
return OpenAI::Internal::Type::Converter.dump(target, value, state: state)
-
end
-
-
6
known_variants.each do
-
10
target = _2.call
-
10
then: 2
else: 8
return OpenAI::Internal::Type::Converter.dump(target, value, state: state) if target === value
-
end
-
-
4
super
-
end
-
-
# @api private
-
#
-
# @return [Object]
-
1
def to_sorbet_type
-
types = variants.map { OpenAI::Internal::Util::SorbetRuntimeSupport.to_sorbet_type(_1) }.uniq
-
case types
-
in: 0
in []
-
T.noreturn
-
in: 0
in [type]
-
type
-
else: 0
else
-
T.any(*types)
-
end
-
end
-
-
# rubocop:enable Style/CaseEquality
-
# rubocop:enable Style/HashEachMethods
-
-
# @api private
-
#
-
# @param depth [Integer]
-
#
-
# @return [String]
-
1
def inspect(depth: 0)
-
then: 0
else: 0
if depth.positive?
-
then: 0
else: 0
return is_a?(Module) ? super() : self.class.name
-
end
-
-
members = variants.map { OpenAI::Internal::Type::Converter.inspect(_1, depth: depth.succ) }
-
then: 0
else: 0
prefix = is_a?(Module) ? name : self.class.name
-
-
"#{prefix}[#{members.join(' | ')}]"
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
1
module Type
-
# @api private
-
#
-
# @abstract
-
#
-
# When we don't know what to expect for the value.
-
1
class Unknown
-
1
extend OpenAI::Internal::Type::Converter
-
1
extend OpenAI::Internal::Util::SorbetRuntimeSupport
-
-
# rubocop:disable Lint/UnusedMethodArgument
-
-
1
private_class_method :new
-
-
# @api public
-
#
-
# @param other [Object]
-
#
-
# @return [Boolean]
-
1
def self.===(other) = true
-
-
# @api public
-
#
-
# @param other [Object]
-
#
-
# @return [Boolean]
-
1
def self.==(other) = other.is_a?(Class) && other <= OpenAI::Internal::Type::Unknown
-
-
1
class << self
-
# @api private
-
#
-
# No coercion needed for Unknown type.
-
#
-
# @param value [Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :translate_names
-
#
-
# @option state [Boolean] :strictness
-
#
-
# @option state [Hash{Symbol=>Object}] :exactness
-
#
-
# @option state [Class<StandardError>] :error
-
#
-
# @option state [Integer] :branched
-
#
-
# @return [Object]
-
1
def coerce(value, state:)
-
state.fetch(:exactness)[:yes] += 1
-
value
-
end
-
-
# @!method dump(value, state:)
-
# @api private
-
#
-
# @param value [Object]
-
#
-
# @param state [Hash{Symbol=>Object}] .
-
#
-
# @option state [Boolean] :can_retry
-
#
-
# @return [Object]
-
-
# @api private
-
#
-
# @return [Object]
-
1
def to_sorbet_type
-
T.anything
-
end
-
end
-
-
# rubocop:enable Lint/UnusedMethodArgument
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Internal
-
# @api private
-
1
module Util
-
# @api private
-
#
-
# @return [Float]
-
1
def self.monotonic_secs = Process.clock_gettime(Process::CLOCK_MONOTONIC)
-
-
# @api private
-
#
-
# @param ns [Module, Class]
-
#
-
# @return [Enumerable<Module, Class>]
-
1
def self.walk_namespaces(ns)
-
2774
ns.constants(false).lazy.flat_map do
-
case (c = ns.const_get(_1, false))
-
4806
in: 2772
in Module | Class
-
2772
walk_namespaces(c)
-
else: 2034
else
-
2034
[]
-
end
-
end
-
.chain([ns])
-
end
-
-
1
class << self
-
# @api private
-
#
-
# @return [String]
-
1
def arch
-
then: 1
else: 0
case (arch = RbConfig::CONFIG["arch"])&.downcase
-
1
in: 0
in nil
-
"unknown"
-
in: 1
in /aarch64|arm64/
-
1
"arm64"
-
in: 0
in /x86_64/
-
"x64"
-
in: 0
in /arm/
-
"arm"
-
else: 0
else
-
"other:#{arch}"
-
end
-
end
-
-
# @api private
-
#
-
# @return [String]
-
1
def os
-
then: 1
else: 0
case (host = RbConfig::CONFIG["host_os"])&.downcase
-
1
in: 0
in nil
-
"Unknown"
-
in: 0
in /linux/
-
"Linux"
-
in: 1
in /darwin/
-
1
"MacOS"
-
in: 0
in /freebsd/
-
"FreeBSD"
-
in: 0
in /openbsd/
-
"OpenBSD"
-
in: 0
in /mswin|mingw|cygwin|ucrt/
-
"Windows"
-
else: 0
else
-
"Other:#{host}"
-
end
-
end
-
end
-
-
1
class << self
-
# @api private
-
#
-
# @param input [Object]
-
#
-
# @return [Boolean]
-
1
def primitive?(input)
-
case input
-
in: 0
in true | false | Numeric | Symbol | String
-
true
-
else: 0
else
-
false
-
end
-
end
-
-
# @api private
-
#
-
# @param input [String, Boolean]
-
#
-
# @return [Boolean, Object]
-
1
def coerce_boolean(input)
-
then: 0
else: 2
case input.is_a?(String) ? input.downcase : input
-
2
in: 0
in "true"
-
true
-
in: 0
in "false"
-
false
-
else: 2
else
-
2
input
-
end
-
end
-
-
# @api private
-
#
-
# @param input [String, Boolean]
-
#
-
# @raise [ArgumentError]
-
# @return [Boolean, nil]
-
1
def coerce_boolean!(input)
-
case coerce_boolean(input)
-
in: 0
in true | false | nil => coerced
-
coerced
-
else: 0
else
-
raise ArgumentError.new("Unable to coerce #{input.inspect} into boolean value")
-
end
-
end
-
-
# @api private
-
#
-
# @param input [String, Integer]
-
#
-
# @return [Integer, Object]
-
1
def coerce_integer(input)
-
Integer(input, exception: false) || input
-
end
-
-
# @api private
-
#
-
# @param input [String, Integer, Float]
-
#
-
# @return [Float, Object]
-
1
def coerce_float(input)
-
Float(input, exception: false) || input
-
end
-
-
# @api private
-
#
-
# @param input [Object]
-
#
-
# @return [Hash{Object=>Object}, Object]
-
1
def coerce_hash(input)
-
case input
-
4
in: 2
in NilClass | Array | Set | Enumerator | StringIO | IO
-
2
input
-
else: 2
else
-
2
then: 2
else: 0
input.respond_to?(:to_h) ? input.to_h : input
-
end
-
end
-
-
# @api private
-
#
-
# @param input [Object]
-
#
-
# @raise [ArgumentError]
-
# @return [Hash{Object=>Object}, nil]
-
1
def coerce_hash!(input)
-
case coerce_hash(input)
-
2
in: 2
in Hash | nil => coerced
-
2
coerced
-
else: 0
else
-
message = "Expected a #{Hash} or #{OpenAI::Internal::Type::BaseModel}, got #{data.inspect}"
-
raise ArgumentError.new(message)
-
end
-
end
-
end
-
-
1
class << self
-
# @api private
-
#
-
# @param lhs [Object]
-
# @param rhs [Object]
-
# @param concat [Boolean]
-
#
-
# @return [Object]
-
1
private def deep_merge_lr(lhs, rhs, concat: false)
-
case [lhs, rhs, concat]
-
6
in: 6
in [Hash, Hash, _]
-
6
lhs.merge(rhs) { deep_merge_lr(_2, _3, concat: concat) }
-
in: 0
in [Array, Array, true]
-
lhs.concat(rhs)
-
else: 0
else
-
rhs
-
end
-
end
-
-
# @api private
-
#
-
# Recursively merge one hash with another. If the values at a given key are not
-
# both hashes, just take the new value.
-
#
-
# @param values [Array<Object>]
-
#
-
# @param sentinel [Object, nil] the value to return if no values are provided.
-
#
-
# @param concat [Boolean] whether to merge sequences by concatenation.
-
#
-
# @return [Object]
-
1
def deep_merge(*values, sentinel: nil, concat: false)
-
case values
-
6
in: 6
in [value, *values]
-
6
values.reduce(value) do |acc, val|
-
6
deep_merge_lr(acc, val, concat: concat)
-
end
-
else: 0
else
-
sentinel
-
end
-
end
-
-
# @api private
-
#
-
# @param data [Hash{Symbol=>Object}, Array<Object>, Object]
-
# @param pick [Symbol, Integer, Array<Symbol, Integer>, Proc, nil]
-
# @param blk [Proc, nil]
-
#
-
# @return [Object, nil]
-
1
def dig(data, pick, &blk)
-
case [data, pick]
-
8
in: 0
in [_, nil]
-
data
-
in: 8
in [Hash, Symbol] | [Array, Integer]
-
16
then: 2
else: 6
data.fetch(pick) { blk&.call }
-
in: 0
in [Hash | Array, Array]
-
pick.reduce(data) do |acc, key|
-
case acc
-
in: 0
in Hash if acc.key?(key)
-
acc.fetch(key)
-
in: 0
in Array if key.is_a?(Integer) && key < acc.length
-
acc[key]
-
else: 0
else
-
then: 0
else: 0
return blk&.call
-
end
-
end
-
in: 0
in [_, Proc]
-
pick.call(data)
-
else: 0
else
-
then: 0
else: 0
blk&.call
-
end
-
end
-
end
-
-
1
class << self
-
# @api private
-
#
-
# @param uri [URI::Generic]
-
#
-
# @return [String]
-
1
def uri_origin(uri)
-
2
then: 2
else: 0
"#{uri.scheme}://#{uri.host}#{uri.port == uri.default_port ? '' : ":#{uri.port}"}"
-
end
-
-
# @api private
-
#
-
# @param path [String, Array<String>]
-
#
-
# @return [String]
-
1
def interpolate_path(path)
-
else: 0
case path
-
2
in: 2
in String
-
2
path
-
in: 0
in []
-
""
-
in: 0
in [String => p, *interpolations]
-
encoded = interpolations.map { ERB::Util.url_encode(_1) }
-
format(p, *encoded)
-
end
-
end
-
end
-
-
1
class << self
-
# @api private
-
#
-
# @param query [String, nil]
-
#
-
# @return [Hash{String=>Array<String>}]
-
1
def decode_query(query)
-
4
CGI.parse(query.to_s)
-
end
-
-
# @api private
-
#
-
# @param query [Hash{String=>Array<String>, String, nil}, nil]
-
#
-
# @return [String, nil]
-
1
def encode_query(query)
-
4
then: 4
else: 0
query.to_h.empty? ? nil : URI.encode_www_form(query)
-
end
-
end
-
-
1
class << self
-
# @api private
-
#
-
# @param url [URI::Generic, String]
-
#
-
# @return [Hash{Symbol=>String, Integer, nil}]
-
1
def parse_uri(url)
-
4
parsed = URI::Generic.component.zip(URI.split(url)).to_h
-
4
{**parsed, query: decode_query(parsed.fetch(:query))}
-
end
-
-
# @api private
-
#
-
# @param parsed [Hash{Symbol=>String, Integer, nil}] .
-
#
-
# @option parsed [String, nil] :scheme
-
#
-
# @option parsed [String, nil] :host
-
#
-
# @option parsed [Integer, nil] :port
-
#
-
# @option parsed [String, nil] :path
-
#
-
# @option parsed [Hash{String=>Array<String>}] :query
-
#
-
# @return [URI::Generic]
-
1
def unparse_uri(parsed)
-
2
URI::Generic.build(**parsed, query: encode_query(parsed.fetch(:query)))
-
end
-
-
# @api private
-
#
-
# @param lhs [Hash{Symbol=>String, Integer, nil}] .
-
#
-
# @option lhs [String, nil] :scheme
-
#
-
# @option lhs [String, nil] :host
-
#
-
# @option lhs [Integer, nil] :port
-
#
-
# @option lhs [String, nil] :path
-
#
-
# @option lhs [Hash{String=>Array<String>}] :query
-
#
-
# @param rhs [Hash{Symbol=>String, Integer, nil}] .
-
#
-
# @option rhs [String, nil] :scheme
-
#
-
# @option rhs [String, nil] :host
-
#
-
# @option rhs [Integer, nil] :port
-
#
-
# @option rhs [String, nil] :path
-
#
-
# @option rhs [Hash{String=>Array<String>}] :query
-
#
-
# @return [URI::Generic]
-
1
def join_parsed_uri(lhs, rhs)
-
2
base_path, base_query = lhs.fetch_values(:path, :query)
-
2
then: 0
else: 2
slashed = base_path.end_with?("/") ? base_path : "#{base_path}/"
-
-
2
parsed_path, parsed_query = parse_uri(rhs.fetch(:path)).fetch_values(:path, :query)
-
2
override = URI::Generic.build(**rhs.slice(:scheme, :host, :port), path: parsed_path)
-
-
2
joined = URI.join(URI::Generic.build(lhs.except(:path, :query)), slashed, override)
-
2
query = deep_merge(
-
2
then: 0
else: 2
joined.path == base_path ? base_query : {},
-
parsed_query,
-
rhs[:query].to_h,
-
concat: true
-
)
-
-
2
joined.query = encode_query(query)
-
2
joined
-
end
-
end
-
-
1
class << self
-
# @api private
-
#
-
# @param headers [Hash{String=>String, Integer, Array<String, Integer, nil>, nil}]
-
#
-
# @return [Hash{String=>String}]
-
1
def normalized_headers(*headers)
-
4
{}.merge(*headers.compact).to_h do |key, val|
-
value =
-
case val
-
42
in: 0
in Array
-
then: 0
else: 0
then: 0
else: 0
val.filter_map { _1&.to_s&.strip }.join(", ")
-
else: 42
else
-
42
then: 34
else: 8
then: 34
else: 8
val&.to_s&.strip
-
end
-
42
[key.downcase, value]
-
end
-
end
-
end
-
-
# @api private
-
#
-
# An adapter that satisfies the IO interface required by `::IO.copy_stream`
-
1
class ReadIOAdapter
-
# @api private
-
#
-
# @return [Boolean, nil]
-
1
def close? = @closing
-
-
# @api private
-
1
def close
-
case @stream
-
2
in: 0
in Enumerator
-
OpenAI::Internal::Util.close_fused!(@stream)
-
in: 0
in IO if close?
-
else: 2
@stream.close
-
else
-
end
-
end
-
-
# @api private
-
#
-
# @param max_len [Integer, nil]
-
#
-
# @return [String]
-
1
private def read_enum(max_len)
-
else: 0
case max_len
-
in: 0
in nil
-
@stream.to_a.join
-
in: 0
in Integer
-
body: 0
@buf << @stream.next while @buf.length < max_len
-
@buf.slice!(..max_len)
-
end
-
rescue StopIteration
-
@stream = nil
-
@buf.slice!(0..)
-
end
-
-
# @api private
-
#
-
# @param max_len [Integer, nil]
-
# @param out_string [String, nil]
-
#
-
# @return [String, nil]
-
1
def read(max_len = nil, out_string = nil)
-
else: 0
case @stream
-
4
in: 0
in nil
-
nil
-
in: 4
in IO | StringIO
-
4
@stream.read(max_len, out_string)
-
in: 0
in Enumerator
-
read = read_enum(max_len)
-
else: 0
case out_string
-
in: 0
in String
-
out_string.replace(read)
-
in: 0
in nil
-
read
-
end
-
end
-
.tap(&@blk)
-
end
-
-
# @api private
-
#
-
# @param src [String, Pathname, StringIO, Enumerable<String>]
-
# @param blk [Proc]
-
#
-
# @yieldparam [String]
-
1
def initialize(src, &blk)
-
@stream =
-
case src
-
2
in: 2
in String
-
2
StringIO.new(src)
-
in: 0
in Pathname
-
@closing = true
-
src.open(binmode: true)
-
else: 0
else
-
src
-
end
-
2
@buf = String.new
-
2
@blk = blk
-
end
-
end
-
-
1
class << self
-
# @param blk [Proc]
-
#
-
# @yieldparam [Enumerator::Yielder]
-
# @return [Enumerable<String>]
-
1
def writable_enum(&blk)
-
Enumerator.new do |y|
-
buf = String.new
-
y.define_singleton_method(:write) do
-
self << buf.replace(_1)
-
buf.bytesize
-
end
-
-
blk.call(y)
-
end
-
end
-
end
-
-
# @type [Regexp]
-
1
JSON_CONTENT = %r{^application/(?:vnd(?:\.[^.]+)*\+)?json(?!l)}
-
# @type [Regexp]
-
1
JSONL_CONTENT = %r{^application/(:?x-(?:n|l)djson)|(:?(?:x-)?jsonl)}
-
-
1
class << self
-
# @api private
-
#
-
# @param y [Enumerator::Yielder]
-
# @param val [Object]
-
# @param closing [Array<Proc>]
-
# @param content_type [String, nil]
-
1
private def write_multipart_content(y, val:, closing:, content_type: nil)
-
content_line = "Content-Type: %s\r\n\r\n"
-
-
case val
-
in: 0
in OpenAI::FilePart
-
return write_multipart_content(
-
y,
-
val: val.content,
-
closing: closing,
-
content_type: val.content_type
-
)
-
in: 0
in Pathname
-
y << format(content_line, content_type || "application/octet-stream")
-
io = val.open(binmode: true)
-
closing << io.method(:close)
-
IO.copy_stream(io, y)
-
in: 0
in IO
-
y << format(content_line, content_type || "application/octet-stream")
-
IO.copy_stream(val, y)
-
in: 0
in StringIO
-
y << format(content_line, content_type || "application/octet-stream")
-
y << val.string
-
in: 0
in -> { primitive?(_1) }
-
y << format(content_line, content_type || "text/plain")
-
y << val.to_s
-
else: 0
else
-
y << format(content_line, content_type || "application/json")
-
y << JSON.generate(val)
-
end
-
y << "\r\n"
-
end
-
-
# @api private
-
#
-
# @param y [Enumerator::Yielder]
-
# @param boundary [String]
-
# @param key [Symbol, String]
-
# @param val [Object]
-
# @param closing [Array<Proc>]
-
1
private def write_multipart_chunk(y, boundary:, key:, val:, closing:)
-
y << "--#{boundary}\r\n"
-
y << "Content-Disposition: form-data"
-
-
else: 0
then: 0
unless key.nil?
-
name = ERB::Util.url_encode(key.to_s)
-
y << "; name=\"#{name}\""
-
end
-
-
case val
-
in: 0
in OpenAI::FilePart unless val.filename.nil?
-
filename = ERB::Util.url_encode(val.filename)
-
y << "; filename=\"#{filename}\""
-
in: 0
in Pathname | IO
-
filename = ERB::Util.url_encode(::File.basename(val.to_path))
-
else: 0
y << "; filename=\"#{filename}\""
-
else
-
end
-
y << "\r\n"
-
-
write_multipart_content(y, val: val, closing: closing)
-
end
-
-
# @api private
-
#
-
# https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.1.1.md#special-considerations-for-multipart-content
-
#
-
# @param body [Object]
-
#
-
# @return [Array(String, Enumerable<String>)]
-
1
private def encode_multipart_streaming(body)
-
boundary = SecureRandom.urlsafe_base64(60)
-
-
closing = []
-
strio = writable_enum do |y|
-
case body
-
in: 0
in Hash
-
body.each do |key, val|
-
case val
-
in: 0
in Array if val.all? { primitive?(_1) }
-
val.each do |v|
-
write_multipart_chunk(y, boundary: boundary, key: key, val: v, closing: closing)
-
end
-
else: 0
else
-
write_multipart_chunk(y, boundary: boundary, key: key, val: val, closing: closing)
-
end
-
end
-
else: 0
else
-
write_multipart_chunk(y, boundary: boundary, key: nil, val: body, closing: closing)
-
end
-
y << "--#{boundary}--\r\n"
-
end
-
-
fused_io = fused_enum(strio) { closing.each(&:call) }
-
[boundary, fused_io]
-
end
-
-
# @api private
-
#
-
# @param headers [Hash{String=>String}]
-
# @param body [Object]
-
#
-
# @return [Object]
-
1
def encode_content(headers, body)
-
# rubocop:disable Style/CaseEquality
-
# rubocop:disable Layout/LineLength
-
2
content_type = headers["content-type"]
-
case [content_type, body]
-
2
in: 2
in [OpenAI::Internal::Util::JSON_CONTENT, Hash | Array | -> { primitive?(_1) }]
-
2
[headers, JSON.generate(body)]
-
in: 0
in [OpenAI::Internal::Util::JSONL_CONTENT, Enumerable] unless OpenAI::Internal::Type::FileInput === body
-
[headers, body.lazy.map { JSON.generate(_1) }]
-
in: 0
in [%r{^multipart/form-data}, Hash | OpenAI::Internal::Type::FileInput]
-
boundary, strio = encode_multipart_streaming(body)
-
headers = {**headers, "content-type" => "#{content_type}; boundary=#{boundary}"}
-
[headers, strio]
-
in: 0
in [_, Symbol | Numeric]
-
[headers, body.to_s]
-
in: 0
in [_, StringIO]
-
[headers, body.string]
-
in: 0
in [_, OpenAI::FilePart]
-
[headers, body.content]
-
else: 0
else
-
[headers, body]
-
end
-
# rubocop:enable Layout/LineLength
-
# rubocop:enable Style/CaseEquality
-
end
-
-
# @api private
-
#
-
# https://www.iana.org/assignments/character-sets/character-sets.xhtml
-
#
-
# @param content_type [String]
-
# @param text [String]
-
1
def force_charset!(content_type, text:)
-
then: 0
else: 0
then: 0
else: 0
charset = /charset=([^;\s]+)/.match(content_type)&.captures&.first
-
-
else: 0
then: 0
return unless charset
-
-
begin
-
encoding = Encoding.find(charset)
-
text.force_encoding(encoding)
-
rescue ArgumentError
-
nil
-
end
-
end
-
-
# @api private
-
#
-
# Assumes each chunk in stream has `Encoding::BINARY`.
-
#
-
# @param headers [Hash{String=>String}, Net::HTTPHeader]
-
# @param stream [Enumerable<String>]
-
# @param suppress_error [Boolean]
-
#
-
# @raise [JSON::ParserError]
-
# @return [Object]
-
1
def decode_content(headers, stream:, suppress_error: false)
-
case (content_type = headers["content-type"])
-
2
in: 2
in OpenAI::Internal::Util::JSON_CONTENT
-
2
json = stream.to_a.join
-
begin
-
2
JSON.parse(json, symbolize_names: true)
-
rescue JSON::ParserError => e
-
else: 0
then: 0
raise e unless suppress_error
-
json
-
end
-
in: 0
in OpenAI::Internal::Util::JSONL_CONTENT
-
lines = decode_lines(stream)
-
chain_fused(lines) do |y|
-
lines.each { y << JSON.parse(_1, symbolize_names: true) }
-
end
-
in: 0
in %r{^text/event-stream}
-
lines = decode_lines(stream)
-
decode_sse(lines)
-
else: 0
else
-
text = stream.to_a.join
-
force_charset!(content_type, text: text)
-
StringIO.new(text)
-
end
-
end
-
end
-
-
1
class << self
-
# @api private
-
#
-
# https://doc.rust-lang.org/std/iter/trait.FusedIterator.html
-
#
-
# @param enum [Enumerable<Object>]
-
# @param external [Boolean]
-
# @param close [Proc]
-
#
-
# @return [Enumerable<Object>]
-
1
def fused_enum(enum, external: false, &close)
-
2
fused = false
-
2
iter = Enumerator.new do |y|
-
4
then: 2
else: 2
next if fused
-
-
2
fused = true
-
2
then: 2
if external
-
6
loop { y << enum.next }
-
else: 0
else
-
enum.each(&y)
-
end
-
ensure
-
4
then: 2
else: 2
close&.call
-
4
close = nil
-
end
-
-
2
iter.define_singleton_method(:rewind) do
-
fused = true
-
self
-
end
-
2
iter
-
end
-
-
# @api private
-
#
-
# @param enum [Enumerable<Object>, nil]
-
1
def close_fused!(enum)
-
else: 0
then: 0
return unless enum.is_a?(Enumerator)
-
-
# rubocop:disable Lint/UnreachableLoop
-
enum.rewind.each { break }
-
# rubocop:enable Lint/UnreachableLoop
-
end
-
-
# @api private
-
#
-
# @param enum [Enumerable<Object>, nil]
-
# @param blk [Proc]
-
#
-
# @yieldparam [Enumerator::Yielder]
-
# @return [Enumerable<Object>]
-
1
def chain_fused(enum, &blk)
-
iter = Enumerator.new { blk.call(_1) }
-
fused_enum(iter) { close_fused!(enum) }
-
end
-
end
-
-
1
class << self
-
# @api private
-
#
-
# Assumes Strings have been forced into having `Encoding::BINARY`.
-
#
-
# This decoder is responsible for reassembling lines split across multiple
-
# fragments.
-
#
-
# @param enum [Enumerable<String>]
-
#
-
# @return [Enumerable<String>]
-
1
def decode_lines(enum)
-
re = /(\r\n|\r|\n)/
-
buffer = String.new
-
cr_seen = nil
-
-
chain_fused(enum) do |y|
-
enum.each do |row|
-
offset = buffer.bytesize
-
buffer << row
-
body: 0
then: 0
else: 0
while (match = re.match(buffer, cr_seen&.to_i || offset))
-
case [match.captures.first, cr_seen]
-
in: 0
in ["\r", nil]
-
cr_seen = match.end(1)
-
next
-
in: 0
in ["\r" | "\r\n", Integer]
-
y << buffer.slice!(..(cr_seen.pred))
-
else: 0
else
-
y << buffer.slice!(..(match.end(1).pred))
-
end
-
offset = 0
-
cr_seen = nil
-
end
-
end
-
-
else: 0
then: 0
y << buffer.slice!(..(cr_seen.pred)) unless cr_seen.nil?
-
else: 0
then: 0
y << buffer unless buffer.empty?
-
end
-
end
-
-
# @api private
-
#
-
# https://html.spec.whatwg.org/multipage/server-sent-events.html#parsing-an-event-stream
-
#
-
# Assumes that `lines` has been decoded with `#decode_lines`.
-
#
-
# @param lines [Enumerable<String>]
-
#
-
# @return [Enumerable<Hash{Symbol=>Object}>]
-
1
def decode_sse(lines)
-
# rubocop:disable Metrics/BlockLength
-
chain_fused(lines) do |y|
-
blank = {event: nil, data: nil, id: nil, retry: nil}
-
current = {}
-
-
lines.each do |line|
-
case line.sub(/\R$/, "")
-
in: 0
in ""
-
then: 0
else: 0
next if current.empty?
-
y << {**blank, **current}
-
current = {}
-
in: 0
in /^:/
-
next
-
in: 0
in /^([^:]+):\s?(.*)$/
-
field, value = Regexp.last_match.captures
-
case field
-
in: 0
in "event"
-
current.merge!(event: value)
-
in: 0
in "data"
-
(current[:data] ||= String.new) << (value << "\n")
-
in: 0
in "id" unless value.include?("\0")
-
current.merge!(id: value)
-
in: 0
in "retry" if /^\d+$/ =~ value
-
else: 0
current.merge!(retry: Integer(value))
-
else
-
else: 0
end
-
else
-
end
-
end
-
# rubocop:enable Metrics/BlockLength
-
-
else: 0
then: 0
y << {**blank, **current} unless current.empty?
-
end
-
end
-
end
-
-
# @api private
-
1
module SorbetRuntimeSupport
-
1
class MissingSorbetRuntimeError < ::RuntimeError
-
end
-
-
# @api private
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
private def sorbet_runtime_constants = @sorbet_runtime_constants ||= {}
-
-
# @api private
-
#
-
# @param name [Symbol]
-
1
def const_missing(name)
-
else: 0
then: 0
super unless sorbet_runtime_constants.key?(name)
-
-
else: 0
then: 0
unless Object.const_defined?(:T)
-
message = "Trying to access a Sorbet constant #{name.inspect} without `sorbet-runtime`."
-
raise MissingSorbetRuntimeError.new(message)
-
end
-
-
sorbet_runtime_constants.fetch(name).call
-
end
-
-
# @api private
-
#
-
# @param name [Symbol]
-
#
-
# @return [Boolean]
-
1
def sorbet_constant_defined?(name) = sorbet_runtime_constants.key?(name)
-
-
# @api private
-
#
-
# @param name [Symbol]
-
# @param blk [Proc]
-
1
def define_sorbet_constant!(name, &blk) = sorbet_runtime_constants.store(name, blk)
-
-
# @api private
-
#
-
# @return [Object]
-
1
def to_sorbet_type = raise NotImplementedError
-
-
1
class << self
-
# @api private
-
#
-
# @param type [OpenAI::Internal::Util::SorbetRuntimeSupport, Object]
-
#
-
# @return [Object]
-
1
def to_sorbet_type(type)
-
case type
-
in: 0
in OpenAI::Internal::Util::SorbetRuntimeSupport
-
type.to_sorbet_type
-
in: 0
in Class | Module
-
type
-
in: 0
in true | false
-
T::Boolean
-
else: 0
else
-
type.class
-
end
-
end
-
end
-
end
-
-
1
extend OpenAI::Internal::Util::SorbetRuntimeSupport
-
-
1
define_sorbet_constant!(:ParsedUri) do
-
T.type_alias do
-
{
-
scheme: T.nilable(String),
-
host: T.nilable(String),
-
port: T.nilable(Integer),
-
path: T.nilable(String),
-
query: T::Hash[String, T::Array[String]]
-
}
-
end
-
end
-
-
1
define_sorbet_constant!(:ServerSentEvent) do
-
T.type_alias do
-
{
-
event: T.nilable(String),
-
data: T.nilable(String),
-
id: T.nilable(String),
-
retry: T.nilable(Integer)
-
}
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
[OpenAI::Internal::Type::BaseModel, *OpenAI::Internal::Type::BaseModel.subclasses].each do |cls|
-
789
cls.define_sorbet_constant!(:OrHash) { T.type_alias { T.any(cls, OpenAI::Internal::AnyHash) } }
-
end
-
-
1
OpenAI::Internal::Util.walk_namespaces(OpenAI::Models).each do |mod|
-
case mod
-
1387
in: 483
in OpenAI::Internal::Type::Enum | OpenAI::Internal::Type::Union
-
483
mod.constants.each do |name|
-
case mod.const_get(name)
-
1229
in: 0
in true | false
-
mod.define_sorbet_constant!(:TaggedBoolean) { T.type_alias { T::Boolean } }
-
mod.define_sorbet_constant!(:OrBoolean) { T.type_alias { T::Boolean } }
-
in: 0
in Integer
-
mod.define_sorbet_constant!(:TaggedInteger) { T.type_alias { Integer } }
-
mod.define_sorbet_constant!(:OrInteger) { T.type_alias { Integer } }
-
in: 0
in Float
-
mod.define_sorbet_constant!(:TaggedFloat) { T.type_alias { Float } }
-
mod.define_sorbet_constant!(:OrFloat) { T.type_alias { Float } }
-
in: 972
in Symbol
-
972
mod.define_sorbet_constant!(:TaggedSymbol) { T.type_alias { Symbol } }
-
972
else: 257
mod.define_sorbet_constant!(:OrSymbol) { T.type_alias { T.any(Symbol, String) } }
-
else
-
end
-
else: 904
end
-
else
-
end
-
end
-
-
1
OpenAI::Internal::Util.walk_namespaces(OpenAI::Models)
-
.lazy
-
.grep(OpenAI::Internal::Type::Union)
-
.each do |mod|
-
204
const = :Variants
-
204
then: 26
else: 178
next if mod.sorbet_constant_defined?(const)
-
-
178
mod.define_sorbet_constant!(const) { T.type_alias { mod.to_sorbet_type } }
-
end
-
-
1
AllModels = OpenAI::Models::AllModels
-
-
1
Audio = OpenAI::Models::Audio
-
-
1
AudioModel = OpenAI::Models::AudioModel
-
-
1
AudioResponseFormat = OpenAI::Models::AudioResponseFormat
-
-
1
AutoFileChunkingStrategyParam = OpenAI::Models::AutoFileChunkingStrategyParam
-
-
1
Batch = OpenAI::Models::Batch
-
-
1
BatchCancelParams = OpenAI::Models::BatchCancelParams
-
-
1
BatchCreateParams = OpenAI::Models::BatchCreateParams
-
-
1
BatchError = OpenAI::Models::BatchError
-
-
1
BatchListParams = OpenAI::Models::BatchListParams
-
-
1
BatchRequestCounts = OpenAI::Models::BatchRequestCounts
-
-
1
BatchRetrieveParams = OpenAI::Models::BatchRetrieveParams
-
-
1
Beta = OpenAI::Models::Beta
-
-
1
Chat = OpenAI::Models::Chat
-
-
1
ChatModel = OpenAI::Models::ChatModel
-
-
1
ComparisonFilter = OpenAI::Models::ComparisonFilter
-
-
1
Completion = OpenAI::Models::Completion
-
-
1
CompletionChoice = OpenAI::Models::CompletionChoice
-
-
1
CompletionCreateParams = OpenAI::Models::CompletionCreateParams
-
-
1
CompletionUsage = OpenAI::Models::CompletionUsage
-
-
1
CompoundFilter = OpenAI::Models::CompoundFilter
-
-
1
ContainerCreateParams = OpenAI::Models::ContainerCreateParams
-
-
1
ContainerDeleteParams = OpenAI::Models::ContainerDeleteParams
-
-
1
ContainerListParams = OpenAI::Models::ContainerListParams
-
-
1
ContainerRetrieveParams = OpenAI::Models::ContainerRetrieveParams
-
-
1
Containers = OpenAI::Models::Containers
-
-
1
CreateEmbeddingResponse = OpenAI::Models::CreateEmbeddingResponse
-
-
1
Embedding = OpenAI::Models::Embedding
-
-
1
EmbeddingCreateParams = OpenAI::Models::EmbeddingCreateParams
-
-
1
EmbeddingModel = OpenAI::Models::EmbeddingModel
-
-
1
ErrorObject = OpenAI::Models::ErrorObject
-
-
1
EvalCreateParams = OpenAI::Models::EvalCreateParams
-
-
1
EvalCustomDataSourceConfig = OpenAI::Models::EvalCustomDataSourceConfig
-
-
1
EvalDeleteParams = OpenAI::Models::EvalDeleteParams
-
-
1
EvalListParams = OpenAI::Models::EvalListParams
-
-
1
EvalRetrieveParams = OpenAI::Models::EvalRetrieveParams
-
-
1
Evals = OpenAI::Models::Evals
-
-
1
EvalStoredCompletionsDataSourceConfig = OpenAI::Models::EvalStoredCompletionsDataSourceConfig
-
-
1
EvalUpdateParams = OpenAI::Models::EvalUpdateParams
-
-
1
FileChunkingStrategy = OpenAI::Models::FileChunkingStrategy
-
-
1
FileChunkingStrategyParam = OpenAI::Models::FileChunkingStrategyParam
-
-
1
FileContent = OpenAI::Models::FileContent
-
-
1
FileContentParams = OpenAI::Models::FileContentParams
-
-
1
FileCreateParams = OpenAI::Models::FileCreateParams
-
-
1
FileDeleted = OpenAI::Models::FileDeleted
-
-
1
FileDeleteParams = OpenAI::Models::FileDeleteParams
-
-
1
FileListParams = OpenAI::Models::FileListParams
-
-
1
FileObject = OpenAI::Models::FileObject
-
-
1
FilePurpose = OpenAI::Models::FilePurpose
-
-
1
FileRetrieveParams = OpenAI::Models::FileRetrieveParams
-
-
1
FineTuning = OpenAI::Models::FineTuning
-
-
1
FunctionDefinition = OpenAI::Models::FunctionDefinition
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
FunctionParameters = OpenAI::Models::FunctionParameters
-
-
1
Graders = OpenAI::Models::Graders
-
-
1
Image = OpenAI::Models::Image
-
-
1
ImageCreateVariationParams = OpenAI::Models::ImageCreateVariationParams
-
-
1
ImageEditCompletedEvent = OpenAI::Models::ImageEditCompletedEvent
-
-
1
ImageEditParams = OpenAI::Models::ImageEditParams
-
-
1
ImageEditPartialImageEvent = OpenAI::Models::ImageEditPartialImageEvent
-
-
1
ImageEditStreamEvent = OpenAI::Models::ImageEditStreamEvent
-
-
1
ImageGenCompletedEvent = OpenAI::Models::ImageGenCompletedEvent
-
-
1
ImageGenerateParams = OpenAI::Models::ImageGenerateParams
-
-
1
ImageGenPartialImageEvent = OpenAI::Models::ImageGenPartialImageEvent
-
-
1
ImageGenStreamEvent = OpenAI::Models::ImageGenStreamEvent
-
-
1
ImageModel = OpenAI::Models::ImageModel
-
-
1
ImagesResponse = OpenAI::Models::ImagesResponse
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
Metadata = OpenAI::Models::Metadata
-
-
1
Model = OpenAI::Models::Model
-
-
1
ModelDeleted = OpenAI::Models::ModelDeleted
-
-
1
ModelDeleteParams = OpenAI::Models::ModelDeleteParams
-
-
1
ModelListParams = OpenAI::Models::ModelListParams
-
-
1
ModelRetrieveParams = OpenAI::Models::ModelRetrieveParams
-
-
1
Moderation = OpenAI::Models::Moderation
-
-
1
ModerationCreateParams = OpenAI::Models::ModerationCreateParams
-
-
1
ModerationImageURLInput = OpenAI::Models::ModerationImageURLInput
-
-
1
ModerationModel = OpenAI::Models::ModerationModel
-
-
1
ModerationMultiModalInput = OpenAI::Models::ModerationMultiModalInput
-
-
1
ModerationTextInput = OpenAI::Models::ModerationTextInput
-
-
1
OtherFileChunkingStrategyObject = OpenAI::Models::OtherFileChunkingStrategyObject
-
-
1
Reasoning = OpenAI::Models::Reasoning
-
-
1
ReasoningEffort = OpenAI::Models::ReasoningEffort
-
-
1
ResponseFormatJSONObject = OpenAI::Models::ResponseFormatJSONObject
-
-
1
ResponseFormatJSONSchema = OpenAI::Models::ResponseFormatJSONSchema
-
-
1
ResponseFormatText = OpenAI::Models::ResponseFormatText
-
-
1
Responses = OpenAI::Models::Responses
-
-
1
ResponsesModel = OpenAI::Models::ResponsesModel
-
-
1
StaticFileChunkingStrategy = OpenAI::Models::StaticFileChunkingStrategy
-
-
1
StaticFileChunkingStrategyObject = OpenAI::Models::StaticFileChunkingStrategyObject
-
-
1
StaticFileChunkingStrategyObjectParam = OpenAI::Models::StaticFileChunkingStrategyObjectParam
-
-
1
Upload = OpenAI::Models::Upload
-
-
1
UploadCancelParams = OpenAI::Models::UploadCancelParams
-
-
1
UploadCompleteParams = OpenAI::Models::UploadCompleteParams
-
-
1
UploadCreateParams = OpenAI::Models::UploadCreateParams
-
-
1
Uploads = OpenAI::Models::Uploads
-
-
1
VectorStore = OpenAI::Models::VectorStore
-
-
1
VectorStoreCreateParams = OpenAI::Models::VectorStoreCreateParams
-
-
1
VectorStoreDeleted = OpenAI::Models::VectorStoreDeleted
-
-
1
VectorStoreDeleteParams = OpenAI::Models::VectorStoreDeleteParams
-
-
1
VectorStoreListParams = OpenAI::Models::VectorStoreListParams
-
-
1
VectorStoreRetrieveParams = OpenAI::Models::VectorStoreRetrieveParams
-
-
1
VectorStores = OpenAI::Models::VectorStores
-
-
1
VectorStoreSearchParams = OpenAI::Models::VectorStoreSearchParams
-
-
1
VectorStoreUpdateParams = OpenAI::Models::VectorStoreUpdateParams
-
-
1
Webhooks = OpenAI::Models::Webhooks
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module AllModels
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
1
variant enum: -> { OpenAI::ChatModel }
-
-
1
variant enum: -> { OpenAI::AllModels::ResponsesOnlyModel }
-
-
1
module ResponsesOnlyModel
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
O1_PRO = :"o1-pro"
-
1
O1_PRO_2025_03_19 = :"o1-pro-2025-03-19"
-
1
O3_PRO = :"o3-pro"
-
1
O3_PRO_2025_06_10 = :"o3-pro-2025-06-10"
-
1
O3_DEEP_RESEARCH = :"o3-deep-research"
-
1
O3_DEEP_RESEARCH_2025_06_26 = :"o3-deep-research-2025-06-26"
-
1
O4_MINI_DEEP_RESEARCH = :"o4-mini-deep-research"
-
1
O4_MINI_DEEP_RESEARCH_2025_06_26 = :"o4-mini-deep-research-2025-06-26"
-
1
COMPUTER_USE_PREVIEW = :"computer-use-preview"
-
1
COMPUTER_USE_PREVIEW_2025_03_11 = :"computer-use-preview-2025-03-11"
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @!method self.variants
-
# @return [Array(String, Symbol, OpenAI::Models::ChatModel, Symbol, OpenAI::Models::AllModels::ResponsesOnlyModel)]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Audio
-
# @see OpenAI::Resources::Audio::Speech#create
-
1
class SpeechCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute input
-
# The text to generate audio for. The maximum length is 4096 characters.
-
#
-
# @return [String]
-
1
required :input, String
-
-
# @!attribute model
-
# One of the available [TTS models](https://platform.openai.com/docs/models#tts):
-
# `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
-
#
-
# @return [String, Symbol, OpenAI::Models::Audio::SpeechModel]
-
1
required :model, union: -> { OpenAI::Audio::SpeechCreateParams::Model }
-
-
# @!attribute voice
-
# The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
-
# `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
-
# `verse`. Previews of the voices are available in the
-
# [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
-
#
-
# @return [String, Symbol, OpenAI::Models::Audio::SpeechCreateParams::Voice]
-
1
required :voice, union: -> { OpenAI::Audio::SpeechCreateParams::Voice }
-
-
# @!attribute instructions
-
# Control the voice of your generated audio with additional instructions. Does not
-
# work with `tts-1` or `tts-1-hd`.
-
#
-
# @return [String, nil]
-
1
optional :instructions, String
-
-
# @!attribute response_format
-
# The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
-
# `wav`, and `pcm`.
-
#
-
# @return [Symbol, OpenAI::Models::Audio::SpeechCreateParams::ResponseFormat, nil]
-
1
optional :response_format, enum: -> { OpenAI::Audio::SpeechCreateParams::ResponseFormat }
-
-
# @!attribute speed
-
# The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
-
# the default.
-
#
-
# @return [Float, nil]
-
1
optional :speed, Float
-
-
# @!attribute stream_format
-
# The format to stream the audio in. Supported formats are `sse` and `audio`.
-
# `sse` is not supported for `tts-1` or `tts-1-hd`.
-
#
-
# @return [Symbol, OpenAI::Models::Audio::SpeechCreateParams::StreamFormat, nil]
-
1
optional :stream_format, enum: -> { OpenAI::Audio::SpeechCreateParams::StreamFormat }
-
-
# @!method initialize(input:, model:, voice:, instructions: nil, response_format: nil, speed: nil, stream_format: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Audio::SpeechCreateParams} for more details.
-
#
-
# @param input [String] The text to generate audio for. The maximum length is 4096 characters.
-
#
-
# @param model [String, Symbol, OpenAI::Models::Audio::SpeechModel] One of the available [TTS models](https://platform.openai.com/docs/models#tts):
-
#
-
# @param voice [String, Symbol, OpenAI::Models::Audio::SpeechCreateParams::Voice] The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
-
#
-
# @param instructions [String] Control the voice of your generated audio with additional instructions. Does not
-
#
-
# @param response_format [Symbol, OpenAI::Models::Audio::SpeechCreateParams::ResponseFormat] The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav
-
#
-
# @param speed [Float] The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
-
#
-
# @param stream_format [Symbol, OpenAI::Models::Audio::SpeechCreateParams::StreamFormat] The format to stream the audio in. Supported formats are `sse` and `audio`. `sse
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# One of the available [TTS models](https://platform.openai.com/docs/models#tts):
-
# `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
-
1
module Model
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
# One of the available [TTS models](https://platform.openai.com/docs/models#tts): `tts-1`, `tts-1-hd` or `gpt-4o-mini-tts`.
-
1
variant enum: -> { OpenAI::Audio::SpeechModel }
-
-
# @!method self.variants
-
# @return [Array(String, Symbol, OpenAI::Models::Audio::SpeechModel)]
-
end
-
-
# The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
-
# `ballad`, `coral`, `echo`, `fable`, `onyx`, `nova`, `sage`, `shimmer`, and
-
# `verse`. Previews of the voices are available in the
-
# [Text to speech guide](https://platform.openai.com/docs/guides/text-to-speech#voice-options).
-
1
module Voice
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
1
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::ALLOY }
-
-
1
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::ASH }
-
-
1
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::BALLAD }
-
-
1
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::CORAL }
-
-
1
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::ECHO }
-
-
1
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::SAGE }
-
-
1
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::SHIMMER }
-
-
1
variant const: -> { OpenAI::Models::Audio::SpeechCreateParams::Voice::VERSE }
-
-
# @!method self.variants
-
# @return [Array(String, Symbol)]
-
-
1
define_sorbet_constant!(:Variants) do
-
T.type_alias { T.any(String, OpenAI::Audio::SpeechCreateParams::Voice::TaggedSymbol) }
-
end
-
-
# @!group
-
-
1
ALLOY = :alloy
-
1
ASH = :ash
-
1
BALLAD = :ballad
-
1
CORAL = :coral
-
1
ECHO = :echo
-
1
SAGE = :sage
-
1
SHIMMER = :shimmer
-
1
VERSE = :verse
-
-
# @!endgroup
-
end
-
-
# The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`,
-
# `wav`, and `pcm`.
-
1
module ResponseFormat
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
MP3 = :mp3
-
1
OPUS = :opus
-
1
AAC = :aac
-
1
FLAC = :flac
-
1
WAV = :wav
-
1
PCM = :pcm
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The format to stream the audio in. Supported formats are `sse` and `audio`.
-
# `sse` is not supported for `tts-1` or `tts-1-hd`.
-
1
module StreamFormat
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
SSE = :sse
-
1
AUDIO = :audio
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Audio
-
1
module SpeechModel
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TTS_1 = :"tts-1"
-
1
TTS_1_HD = :"tts-1-hd"
-
1
GPT_4O_MINI_TTS = :"gpt-4o-mini-tts"
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Audio
-
1
class Transcription < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The transcribed text.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute logprobs
-
# The log probabilities of the tokens in the transcription. Only returned with the
-
# models `gpt-4o-transcribe` and `gpt-4o-mini-transcribe` if `logprobs` is added
-
# to the `include` array.
-
#
-
# @return [Array<OpenAI::Models::Audio::Transcription::Logprob>, nil]
-
1
optional :logprobs, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::Transcription::Logprob] }
-
-
# @!attribute usage
-
# Token usage statistics for the request.
-
#
-
# @return [OpenAI::Models::Audio::Transcription::Usage::Tokens, OpenAI::Models::Audio::Transcription::Usage::Duration, nil]
-
1
optional :usage, union: -> { OpenAI::Audio::Transcription::Usage }
-
-
# @!method initialize(text:, logprobs: nil, usage: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Audio::Transcription} for more details.
-
#
-
# Represents a transcription response returned by model, based on the provided
-
# input.
-
#
-
# @param text [String] The transcribed text.
-
#
-
# @param logprobs [Array<OpenAI::Models::Audio::Transcription::Logprob>] The log probabilities of the tokens in the transcription. Only returned with the
-
#
-
# @param usage [OpenAI::Models::Audio::Transcription::Usage::Tokens, OpenAI::Models::Audio::Transcription::Usage::Duration] Token usage statistics for the request.
-
-
1
class Logprob < OpenAI::Internal::Type::BaseModel
-
# @!attribute token
-
# The token in the transcription.
-
#
-
# @return [String, nil]
-
1
optional :token, String
-
-
# @!attribute bytes
-
# The bytes of the token.
-
#
-
# @return [Array<Float>, nil]
-
1
optional :bytes, OpenAI::Internal::Type::ArrayOf[Float]
-
-
# @!attribute logprob
-
# The log probability of the token.
-
#
-
# @return [Float, nil]
-
1
optional :logprob, Float
-
-
# @!method initialize(token: nil, bytes: nil, logprob: nil)
-
# @param token [String] The token in the transcription.
-
#
-
# @param bytes [Array<Float>] The bytes of the token.
-
#
-
# @param logprob [Float] The log probability of the token.
-
end
-
-
# Token usage statistics for the request.
-
#
-
# @see OpenAI::Models::Audio::Transcription#usage
-
1
module Usage
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# Usage statistics for models billed by token usage.
-
1
variant :tokens, -> { OpenAI::Audio::Transcription::Usage::Tokens }
-
-
# Usage statistics for models billed by audio input duration.
-
1
variant :duration, -> { OpenAI::Audio::Transcription::Usage::Duration }
-
-
1
class Tokens < OpenAI::Internal::Type::BaseModel
-
# @!attribute input_tokens
-
# Number of input tokens billed for this request.
-
#
-
# @return [Integer]
-
1
required :input_tokens, Integer
-
-
# @!attribute output_tokens
-
# Number of output tokens generated.
-
#
-
# @return [Integer]
-
1
required :output_tokens, Integer
-
-
# @!attribute total_tokens
-
# Total number of tokens used (input + output).
-
#
-
# @return [Integer]
-
1
required :total_tokens, Integer
-
-
# @!attribute type
-
# The type of the usage object. Always `tokens` for this variant.
-
#
-
# @return [Symbol, :tokens]
-
1
required :type, const: :tokens
-
-
# @!attribute input_token_details
-
# Details about the input tokens billed for this request.
-
#
-
# @return [OpenAI::Models::Audio::Transcription::Usage::Tokens::InputTokenDetails, nil]
-
1
optional :input_token_details,
-
-> {
-
OpenAI::Audio::Transcription::Usage::Tokens::InputTokenDetails
-
}
-
-
# @!method initialize(input_tokens:, output_tokens:, total_tokens:, input_token_details: nil, type: :tokens)
-
# Usage statistics for models billed by token usage.
-
#
-
# @param input_tokens [Integer] Number of input tokens billed for this request.
-
#
-
# @param output_tokens [Integer] Number of output tokens generated.
-
#
-
# @param total_tokens [Integer] Total number of tokens used (input + output).
-
#
-
# @param input_token_details [OpenAI::Models::Audio::Transcription::Usage::Tokens::InputTokenDetails] Details about the input tokens billed for this request.
-
#
-
# @param type [Symbol, :tokens] The type of the usage object. Always `tokens` for this variant.
-
-
# @see OpenAI::Models::Audio::Transcription::Usage::Tokens#input_token_details
-
1
class InputTokenDetails < OpenAI::Internal::Type::BaseModel
-
# @!attribute audio_tokens
-
# Number of audio tokens billed for this request.
-
#
-
# @return [Integer, nil]
-
1
optional :audio_tokens, Integer
-
-
# @!attribute text_tokens
-
# Number of text tokens billed for this request.
-
#
-
# @return [Integer, nil]
-
1
optional :text_tokens, Integer
-
-
# @!method initialize(audio_tokens: nil, text_tokens: nil)
-
# Details about the input tokens billed for this request.
-
#
-
# @param audio_tokens [Integer] Number of audio tokens billed for this request.
-
#
-
# @param text_tokens [Integer] Number of text tokens billed for this request.
-
end
-
end
-
-
1
class Duration < OpenAI::Internal::Type::BaseModel
-
# @!attribute seconds
-
# Duration of the input audio in seconds.
-
#
-
# @return [Float]
-
1
required :seconds, Float
-
-
# @!attribute type
-
# The type of the usage object. Always `duration` for this variant.
-
#
-
# @return [Symbol, :duration]
-
1
required :type, const: :duration
-
-
# @!method initialize(seconds:, type: :duration)
-
# Usage statistics for models billed by audio input duration.
-
#
-
# @param seconds [Float] Duration of the input audio in seconds.
-
#
-
# @param type [Symbol, :duration] The type of the usage object. Always `duration` for this variant.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Audio::Transcription::Usage::Tokens, OpenAI::Models::Audio::Transcription::Usage::Duration)]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Audio
-
# @see OpenAI::Resources::Audio::Transcriptions#create
-
#
-
# @see OpenAI::Resources::Audio::Transcriptions#create_streaming
-
1
class TranscriptionCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute file
-
# The audio file object (not file name) to transcribe, in one of these formats:
-
# flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
-
#
-
# @return [Pathname, StringIO, IO, String, OpenAI::FilePart]
-
1
required :file, OpenAI::Internal::Type::FileInput
-
-
# @!attribute model
-
# ID of the model to use. The options are `gpt-4o-transcribe`,
-
# `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source
-
# Whisper V2 model).
-
#
-
# @return [String, Symbol, OpenAI::Models::AudioModel]
-
1
required :model, union: -> { OpenAI::Audio::TranscriptionCreateParams::Model }
-
-
# @!attribute chunking_strategy
-
# Controls how the audio is cut into chunks. When set to `"auto"`, the server
-
# first normalizes loudness and then uses voice activity detection (VAD) to choose
-
# boundaries. `server_vad` object can be provided to tweak VAD detection
-
# parameters manually. If unset, the audio is transcribed as a single block.
-
#
-
# @return [Symbol, :auto, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig, nil]
-
1
optional :chunking_strategy,
-
union: -> { OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy },
-
nil?: true
-
-
# @!attribute include
-
# Additional information to include in the transcription response. `logprobs` will
-
# return the log probabilities of the tokens in the response to understand the
-
# model's confidence in the transcription. `logprobs` only works with
-
# response_format set to `json` and only with the models `gpt-4o-transcribe` and
-
# `gpt-4o-mini-transcribe`.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Audio::TranscriptionInclude>, nil]
-
1
optional :include, -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Audio::TranscriptionInclude] }
-
-
# @!attribute language
-
# The language of the input audio. Supplying the input language in
-
# [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) (e.g. `en`)
-
# format will improve accuracy and latency.
-
#
-
# @return [String, nil]
-
1
optional :language, String
-
-
# @!attribute prompt
-
# An optional text to guide the model's style or continue a previous audio
-
# segment. The
-
# [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
-
# should match the audio language.
-
#
-
# @return [String, nil]
-
1
optional :prompt, String
-
-
# @!attribute response_format
-
# The format of the output, in one of these options: `json`, `text`, `srt`,
-
# `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
-
# the only supported format is `json`.
-
#
-
# @return [Symbol, OpenAI::Models::AudioResponseFormat, nil]
-
1
optional :response_format, enum: -> { OpenAI::AudioResponseFormat }
-
-
# @!attribute temperature
-
# The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
-
# output more random, while lower values like 0.2 will make it more focused and
-
# deterministic. If set to 0, the model will use
-
# [log probability](https://en.wikipedia.org/wiki/Log_probability) to
-
# automatically increase the temperature until certain thresholds are hit.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float
-
-
# @!attribute timestamp_granularities
-
# The timestamp granularities to populate for this transcription.
-
# `response_format` must be set `verbose_json` to use timestamp granularities.
-
# Either or both of these options are supported: `word`, or `segment`. Note: There
-
# is no additional latency for segment timestamps, but generating word timestamps
-
# incurs additional latency.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Audio::TranscriptionCreateParams::TimestampGranularity>, nil]
-
1
optional :timestamp_granularities,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Audio::TranscriptionCreateParams::TimestampGranularity]
-
}
-
-
# @!method initialize(file:, model:, chunking_strategy: nil, include: nil, language: nil, prompt: nil, response_format: nil, temperature: nil, timestamp_granularities: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Audio::TranscriptionCreateParams} for more details.
-
#
-
# @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The audio file object (not file name) to transcribe, in one of these formats: fl
-
#
-
# @param model [String, Symbol, OpenAI::Models::AudioModel] ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transc
-
#
-
# @param chunking_strategy [Symbol, :auto, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig, nil] Controls how the audio is cut into chunks. When set to `"auto"`, the server firs
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Audio::TranscriptionInclude>] Additional information to include in the transcription response.
-
#
-
# @param language [String] The language of the input audio. Supplying the input language in [ISO-639-1](htt
-
#
-
# @param prompt [String] An optional text to guide the model's style or continue a previous audio segment
-
#
-
# @param response_format [Symbol, OpenAI::Models::AudioResponseFormat] The format of the output, in one of these options: `json`, `text`, `srt`, `verbo
-
#
-
# @param temperature [Float] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
-
#
-
# @param timestamp_granularities [Array<Symbol, OpenAI::Models::Audio::TranscriptionCreateParams::TimestampGranularity>] The timestamp granularities to populate for this transcription. `response_format
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# ID of the model to use. The options are `gpt-4o-transcribe`,
-
# `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source
-
# Whisper V2 model).
-
1
module Model
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
# ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transcribe`, and `whisper-1` (which is powered by our open source Whisper V2 model).
-
1
variant enum: -> { OpenAI::AudioModel }
-
-
# @!method self.variants
-
# @return [Array(String, Symbol, OpenAI::Models::AudioModel)]
-
end
-
-
# Controls how the audio is cut into chunks. When set to `"auto"`, the server
-
# first normalizes loudness and then uses voice activity detection (VAD) to choose
-
# boundaries. `server_vad` object can be provided to tweak VAD detection
-
# parameters manually. If unset, the audio is transcribed as a single block.
-
1
module ChunkingStrategy
-
1
extend OpenAI::Internal::Type::Union
-
-
# Automatically set chunking parameters based on the audio. Must be set to `"auto"`.
-
1
variant const: :auto
-
-
1
variant -> { OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig }
-
-
1
class VadConfig < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# Must be set to `server_vad` to enable manual chunking using server side VAD.
-
#
-
# @return [Symbol, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type]
-
1
required :type,
-
enum: -> {
-
OpenAI::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type
-
}
-
-
# @!attribute prefix_padding_ms
-
# Amount of audio to include before the VAD detected speech (in milliseconds).
-
#
-
# @return [Integer, nil]
-
1
optional :prefix_padding_ms, Integer
-
-
# @!attribute silence_duration_ms
-
# Duration of silence to detect speech stop (in milliseconds). With shorter values
-
# the model will respond more quickly, but may jump in on short pauses from the
-
# user.
-
#
-
# @return [Integer, nil]
-
1
optional :silence_duration_ms, Integer
-
-
# @!attribute threshold
-
# Sensitivity threshold (0.0 to 1.0) for voice activity detection. A higher
-
# threshold will require louder audio to activate the model, and thus might
-
# perform better in noisy environments.
-
#
-
# @return [Float, nil]
-
1
optional :threshold, Float
-
-
# @!method initialize(type:, prefix_padding_ms: nil, silence_duration_ms: nil, threshold: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig}
-
# for more details.
-
#
-
# @param type [Symbol, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig::Type] Must be set to `server_vad` to enable manual chunking using server side VAD.
-
#
-
# @param prefix_padding_ms [Integer] Amount of audio to include before the VAD detected speech (in
-
#
-
# @param silence_duration_ms [Integer] Duration of silence to detect speech stop (in milliseconds).
-
#
-
# @param threshold [Float] Sensitivity threshold (0.0 to 1.0) for voice activity detection. A
-
-
# Must be set to `server_vad` to enable manual chunking using server side VAD.
-
#
-
# @see OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
SERVER_VAD = :server_vad
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig)]
-
end
-
-
1
module TimestampGranularity
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
WORD = :word
-
1
SEGMENT = :segment
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Audio
-
# Represents a transcription response returned by model, based on the provided
-
# input.
-
#
-
# @see OpenAI::Resources::Audio::Transcriptions#create
-
#
-
# @see OpenAI::Resources::Audio::Transcriptions#create_streaming
-
1
module TranscriptionCreateResponse
-
1
extend OpenAI::Internal::Type::Union
-
-
# Represents a transcription response returned by model, based on the provided input.
-
1
variant -> { OpenAI::Audio::Transcription }
-
-
# Represents a verbose json transcription response returned by model, based on the provided input.
-
1
variant -> { OpenAI::Audio::TranscriptionVerbose }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Audio::Transcription, OpenAI::Models::Audio::TranscriptionVerbose)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Audio
-
1
module TranscriptionInclude
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
LOGPROBS = :logprobs
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Audio
-
1
class TranscriptionSegment < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier of the segment.
-
#
-
# @return [Integer]
-
1
required :id, Integer
-
-
# @!attribute avg_logprob
-
# Average logprob of the segment. If the value is lower than -1, consider the
-
# logprobs failed.
-
#
-
# @return [Float]
-
1
required :avg_logprob, Float
-
-
# @!attribute compression_ratio
-
# Compression ratio of the segment. If the value is greater than 2.4, consider the
-
# compression failed.
-
#
-
# @return [Float]
-
1
required :compression_ratio, Float
-
-
# @!attribute end_
-
# End time of the segment in seconds.
-
#
-
# @return [Float]
-
1
required :end_, Float, api_name: :end
-
-
# @!attribute no_speech_prob
-
# Probability of no speech in the segment. If the value is higher than 1.0 and the
-
# `avg_logprob` is below -1, consider this segment silent.
-
#
-
# @return [Float]
-
1
required :no_speech_prob, Float
-
-
# @!attribute seek
-
# Seek offset of the segment.
-
#
-
# @return [Integer]
-
1
required :seek, Integer
-
-
# @!attribute start
-
# Start time of the segment in seconds.
-
#
-
# @return [Float]
-
1
required :start, Float
-
-
# @!attribute temperature
-
# Temperature parameter used for generating the segment.
-
#
-
# @return [Float]
-
1
required :temperature, Float
-
-
# @!attribute text
-
# Text content of the segment.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute tokens
-
# Array of token IDs for the text content.
-
#
-
# @return [Array<Integer>]
-
1
required :tokens, OpenAI::Internal::Type::ArrayOf[Integer]
-
-
# @!method initialize(id:, avg_logprob:, compression_ratio:, end_:, no_speech_prob:, seek:, start:, temperature:, text:, tokens:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Audio::TranscriptionSegment} for more details.
-
#
-
# @param id [Integer] Unique identifier of the segment.
-
#
-
# @param avg_logprob [Float] Average logprob of the segment. If the value is lower than -1, consider the logp
-
#
-
# @param compression_ratio [Float] Compression ratio of the segment. If the value is greater than 2.4, consider the
-
#
-
# @param end_ [Float] End time of the segment in seconds.
-
#
-
# @param no_speech_prob [Float] Probability of no speech in the segment. If the value is higher than 1.0 and the
-
#
-
# @param seek [Integer] Seek offset of the segment.
-
#
-
# @param start [Float] Start time of the segment in seconds.
-
#
-
# @param temperature [Float] Temperature parameter used for generating the segment.
-
#
-
# @param text [String] Text content of the segment.
-
#
-
# @param tokens [Array<Integer>] Array of token IDs for the text content.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Audio
-
# Emitted when there is an additional text delta. This is also the first event
-
# emitted when the transcription starts. Only emitted when you
-
# [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription)
-
# with the `Stream` parameter set to `true`.
-
1
module TranscriptionStreamEvent
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# Emitted when there is an additional text delta. This is also the first event emitted when the transcription starts. Only emitted when you [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) with the `Stream` parameter set to `true`.
-
1
variant :"transcript.text.delta", -> { OpenAI::Audio::TranscriptionTextDeltaEvent }
-
-
# Emitted when the transcription is complete. Contains the complete transcription text. Only emitted when you [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription) with the `Stream` parameter set to `true`.
-
1
variant :"transcript.text.done", -> { OpenAI::Audio::TranscriptionTextDoneEvent }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Audio::TranscriptionTextDeltaEvent, OpenAI::Models::Audio::TranscriptionTextDoneEvent)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Audio
-
1
class TranscriptionTextDeltaEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute delta
-
# The text delta that was additionally transcribed.
-
#
-
# @return [String]
-
1
required :delta, String
-
-
# @!attribute type
-
# The type of the event. Always `transcript.text.delta`.
-
#
-
# @return [Symbol, :"transcript.text.delta"]
-
1
required :type, const: :"transcript.text.delta"
-
-
# @!attribute logprobs
-
# The log probabilities of the delta. Only included if you
-
# [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription)
-
# with the `include[]` parameter set to `logprobs`.
-
#
-
# @return [Array<OpenAI::Models::Audio::TranscriptionTextDeltaEvent::Logprob>, nil]
-
1
optional :logprobs,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::TranscriptionTextDeltaEvent::Logprob] }
-
-
# @!method initialize(delta:, logprobs: nil, type: :"transcript.text.delta")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Audio::TranscriptionTextDeltaEvent} for more details.
-
#
-
# Emitted when there is an additional text delta. This is also the first event
-
# emitted when the transcription starts. Only emitted when you
-
# [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription)
-
# with the `Stream` parameter set to `true`.
-
#
-
# @param delta [String] The text delta that was additionally transcribed.
-
#
-
# @param logprobs [Array<OpenAI::Models::Audio::TranscriptionTextDeltaEvent::Logprob>] The log probabilities of the delta. Only included if you [create a transcription
-
#
-
# @param type [Symbol, :"transcript.text.delta"] The type of the event. Always `transcript.text.delta`.
-
-
1
class Logprob < OpenAI::Internal::Type::BaseModel
-
# @!attribute token
-
# The token that was used to generate the log probability.
-
#
-
# @return [String, nil]
-
1
optional :token, String
-
-
# @!attribute bytes
-
# The bytes that were used to generate the log probability.
-
#
-
# @return [Array<Integer>, nil]
-
1
optional :bytes, OpenAI::Internal::Type::ArrayOf[Integer]
-
-
# @!attribute logprob
-
# The log probability of the token.
-
#
-
# @return [Float, nil]
-
1
optional :logprob, Float
-
-
# @!method initialize(token: nil, bytes: nil, logprob: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Audio::TranscriptionTextDeltaEvent::Logprob} for more details.
-
#
-
# @param token [String] The token that was used to generate the log probability.
-
#
-
# @param bytes [Array<Integer>] The bytes that were used to generate the log probability.
-
#
-
# @param logprob [Float] The log probability of the token.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Audio
-
1
class TranscriptionTextDoneEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The text that was transcribed.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of the event. Always `transcript.text.done`.
-
#
-
# @return [Symbol, :"transcript.text.done"]
-
1
required :type, const: :"transcript.text.done"
-
-
# @!attribute logprobs
-
# The log probabilities of the individual tokens in the transcription. Only
-
# included if you
-
# [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription)
-
# with the `include[]` parameter set to `logprobs`.
-
#
-
# @return [Array<OpenAI::Models::Audio::TranscriptionTextDoneEvent::Logprob>, nil]
-
1
optional :logprobs,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::TranscriptionTextDoneEvent::Logprob] }
-
-
# @!attribute usage
-
# Usage statistics for models billed by token usage.
-
#
-
# @return [OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage, nil]
-
1
optional :usage, -> { OpenAI::Audio::TranscriptionTextDoneEvent::Usage }
-
-
# @!method initialize(text:, logprobs: nil, usage: nil, type: :"transcript.text.done")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Audio::TranscriptionTextDoneEvent} for more details.
-
#
-
# Emitted when the transcription is complete. Contains the complete transcription
-
# text. Only emitted when you
-
# [create a transcription](https://platform.openai.com/docs/api-reference/audio/create-transcription)
-
# with the `Stream` parameter set to `true`.
-
#
-
# @param text [String] The text that was transcribed.
-
#
-
# @param logprobs [Array<OpenAI::Models::Audio::TranscriptionTextDoneEvent::Logprob>] The log probabilities of the individual tokens in the transcription. Only includ
-
#
-
# @param usage [OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage] Usage statistics for models billed by token usage.
-
#
-
# @param type [Symbol, :"transcript.text.done"] The type of the event. Always `transcript.text.done`.
-
-
1
class Logprob < OpenAI::Internal::Type::BaseModel
-
# @!attribute token
-
# The token that was used to generate the log probability.
-
#
-
# @return [String, nil]
-
1
optional :token, String
-
-
# @!attribute bytes
-
# The bytes that were used to generate the log probability.
-
#
-
# @return [Array<Integer>, nil]
-
1
optional :bytes, OpenAI::Internal::Type::ArrayOf[Integer]
-
-
# @!attribute logprob
-
# The log probability of the token.
-
#
-
# @return [Float, nil]
-
1
optional :logprob, Float
-
-
# @!method initialize(token: nil, bytes: nil, logprob: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Audio::TranscriptionTextDoneEvent::Logprob} for more details.
-
#
-
# @param token [String] The token that was used to generate the log probability.
-
#
-
# @param bytes [Array<Integer>] The bytes that were used to generate the log probability.
-
#
-
# @param logprob [Float] The log probability of the token.
-
end
-
-
# @see OpenAI::Models::Audio::TranscriptionTextDoneEvent#usage
-
1
class Usage < OpenAI::Internal::Type::BaseModel
-
# @!attribute input_tokens
-
# Number of input tokens billed for this request.
-
#
-
# @return [Integer]
-
1
required :input_tokens, Integer
-
-
# @!attribute output_tokens
-
# Number of output tokens generated.
-
#
-
# @return [Integer]
-
1
required :output_tokens, Integer
-
-
# @!attribute total_tokens
-
# Total number of tokens used (input + output).
-
#
-
# @return [Integer]
-
1
required :total_tokens, Integer
-
-
# @!attribute type
-
# The type of the usage object. Always `tokens` for this variant.
-
#
-
# @return [Symbol, :tokens]
-
1
required :type, const: :tokens
-
-
# @!attribute input_token_details
-
# Details about the input tokens billed for this request.
-
#
-
# @return [OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails, nil]
-
1
optional :input_token_details,
-
-> {
-
OpenAI::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails
-
}
-
-
# @!method initialize(input_tokens:, output_tokens:, total_tokens:, input_token_details: nil, type: :tokens)
-
# Usage statistics for models billed by token usage.
-
#
-
# @param input_tokens [Integer] Number of input tokens billed for this request.
-
#
-
# @param output_tokens [Integer] Number of output tokens generated.
-
#
-
# @param total_tokens [Integer] Total number of tokens used (input + output).
-
#
-
# @param input_token_details [OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage::InputTokenDetails] Details about the input tokens billed for this request.
-
#
-
# @param type [Symbol, :tokens] The type of the usage object. Always `tokens` for this variant.
-
-
# @see OpenAI::Models::Audio::TranscriptionTextDoneEvent::Usage#input_token_details
-
1
class InputTokenDetails < OpenAI::Internal::Type::BaseModel
-
# @!attribute audio_tokens
-
# Number of audio tokens billed for this request.
-
#
-
# @return [Integer, nil]
-
1
optional :audio_tokens, Integer
-
-
# @!attribute text_tokens
-
# Number of text tokens billed for this request.
-
#
-
# @return [Integer, nil]
-
1
optional :text_tokens, Integer
-
-
# @!method initialize(audio_tokens: nil, text_tokens: nil)
-
# Details about the input tokens billed for this request.
-
#
-
# @param audio_tokens [Integer] Number of audio tokens billed for this request.
-
#
-
# @param text_tokens [Integer] Number of text tokens billed for this request.
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Audio
-
1
class TranscriptionVerbose < OpenAI::Internal::Type::BaseModel
-
# @!attribute duration
-
# The duration of the input audio.
-
#
-
# @return [Float]
-
1
required :duration, Float
-
-
# @!attribute language
-
# The language of the input audio.
-
#
-
# @return [String]
-
1
required :language, String
-
-
# @!attribute text
-
# The transcribed text.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute segments
-
# Segments of the transcribed text and their corresponding details.
-
#
-
# @return [Array<OpenAI::Models::Audio::TranscriptionSegment>, nil]
-
1
optional :segments, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::TranscriptionSegment] }
-
-
# @!attribute usage
-
# Usage statistics for models billed by audio input duration.
-
#
-
# @return [OpenAI::Models::Audio::TranscriptionVerbose::Usage, nil]
-
1
optional :usage, -> { OpenAI::Audio::TranscriptionVerbose::Usage }
-
-
# @!attribute words
-
# Extracted words and their corresponding timestamps.
-
#
-
# @return [Array<OpenAI::Models::Audio::TranscriptionWord>, nil]
-
1
optional :words, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::TranscriptionWord] }
-
-
# @!method initialize(duration:, language:, text:, segments: nil, usage: nil, words: nil)
-
# Represents a verbose json transcription response returned by model, based on the
-
# provided input.
-
#
-
# @param duration [Float] The duration of the input audio.
-
#
-
# @param language [String] The language of the input audio.
-
#
-
# @param text [String] The transcribed text.
-
#
-
# @param segments [Array<OpenAI::Models::Audio::TranscriptionSegment>] Segments of the transcribed text and their corresponding details.
-
#
-
# @param usage [OpenAI::Models::Audio::TranscriptionVerbose::Usage] Usage statistics for models billed by audio input duration.
-
#
-
# @param words [Array<OpenAI::Models::Audio::TranscriptionWord>] Extracted words and their corresponding timestamps.
-
-
# @see OpenAI::Models::Audio::TranscriptionVerbose#usage
-
1
class Usage < OpenAI::Internal::Type::BaseModel
-
# @!attribute seconds
-
# Duration of the input audio in seconds.
-
#
-
# @return [Float]
-
1
required :seconds, Float
-
-
# @!attribute type
-
# The type of the usage object. Always `duration` for this variant.
-
#
-
# @return [Symbol, :duration]
-
1
required :type, const: :duration
-
-
# @!method initialize(seconds:, type: :duration)
-
# Usage statistics for models billed by audio input duration.
-
#
-
# @param seconds [Float] Duration of the input audio in seconds.
-
#
-
# @param type [Symbol, :duration] The type of the usage object. Always `duration` for this variant.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Audio
-
1
class TranscriptionWord < OpenAI::Internal::Type::BaseModel
-
# @!attribute end_
-
# End time of the word in seconds.
-
#
-
# @return [Float]
-
1
required :end_, Float, api_name: :end
-
-
# @!attribute start
-
# Start time of the word in seconds.
-
#
-
# @return [Float]
-
1
required :start, Float
-
-
# @!attribute word
-
# The text content of the word.
-
#
-
# @return [String]
-
1
required :word, String
-
-
# @!method initialize(end_:, start:, word:)
-
# @param end_ [Float] End time of the word in seconds.
-
#
-
# @param start [Float] Start time of the word in seconds.
-
#
-
# @param word [String] The text content of the word.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Audio
-
1
class Translation < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!method initialize(text:)
-
# @param text [String]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Audio
-
# @see OpenAI::Resources::Audio::Translations#create
-
1
class TranslationCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute file
-
# The audio file object (not file name) translate, in one of these formats: flac,
-
# mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
-
#
-
# @return [Pathname, StringIO, IO, String, OpenAI::FilePart]
-
1
required :file, OpenAI::Internal::Type::FileInput
-
-
# @!attribute model
-
# ID of the model to use. Only `whisper-1` (which is powered by our open source
-
# Whisper V2 model) is currently available.
-
#
-
# @return [String, Symbol, OpenAI::Models::AudioModel]
-
1
required :model, union: -> { OpenAI::Audio::TranslationCreateParams::Model }
-
-
# @!attribute prompt
-
# An optional text to guide the model's style or continue a previous audio
-
# segment. The
-
# [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
-
# should be in English.
-
#
-
# @return [String, nil]
-
1
optional :prompt, String
-
-
# @!attribute response_format
-
# The format of the output, in one of these options: `json`, `text`, `srt`,
-
# `verbose_json`, or `vtt`.
-
#
-
# @return [Symbol, OpenAI::Models::Audio::TranslationCreateParams::ResponseFormat, nil]
-
1
optional :response_format, enum: -> { OpenAI::Audio::TranslationCreateParams::ResponseFormat }
-
-
# @!attribute temperature
-
# The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
-
# output more random, while lower values like 0.2 will make it more focused and
-
# deterministic. If set to 0, the model will use
-
# [log probability](https://en.wikipedia.org/wiki/Log_probability) to
-
# automatically increase the temperature until certain thresholds are hit.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float
-
-
# @!method initialize(file:, model:, prompt: nil, response_format: nil, temperature: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Audio::TranslationCreateParams} for more details.
-
#
-
# @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The audio file object (not file name) translate, in one of these formats: flac,
-
#
-
# @param model [String, Symbol, OpenAI::Models::AudioModel] ID of the model to use. Only `whisper-1` (which is powered by our open source Wh
-
#
-
# @param prompt [String] An optional text to guide the model's style or continue a previous audio segment
-
#
-
# @param response_format [Symbol, OpenAI::Models::Audio::TranslationCreateParams::ResponseFormat] The format of the output, in one of these options: `json`, `text`, `srt`, `verbo
-
#
-
# @param temperature [Float] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# ID of the model to use. Only `whisper-1` (which is powered by our open source
-
# Whisper V2 model) is currently available.
-
1
module Model
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
# ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available.
-
1
variant enum: -> { OpenAI::AudioModel }
-
-
# @!method self.variants
-
# @return [Array(String, Symbol, OpenAI::Models::AudioModel)]
-
end
-
-
# The format of the output, in one of these options: `json`, `text`, `srt`,
-
# `verbose_json`, or `vtt`.
-
1
module ResponseFormat
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
JSON = :json
-
1
TEXT = :text
-
1
SRT = :srt
-
1
VERBOSE_JSON = :verbose_json
-
1
VTT = :vtt
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Audio
-
# @see OpenAI::Resources::Audio::Translations#create
-
1
module TranslationCreateResponse
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant -> { OpenAI::Audio::Translation }
-
-
1
variant -> { OpenAI::Audio::TranslationVerbose }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Audio::Translation, OpenAI::Models::Audio::TranslationVerbose)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Audio
-
1
class TranslationVerbose < OpenAI::Internal::Type::BaseModel
-
# @!attribute duration
-
# The duration of the input audio.
-
#
-
# @return [Float]
-
1
required :duration, Float
-
-
# @!attribute language
-
# The language of the output translation (always `english`).
-
#
-
# @return [String]
-
1
required :language, String
-
-
# @!attribute text
-
# The translated text.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute segments
-
# Segments of the translated text and their corresponding details.
-
#
-
# @return [Array<OpenAI::Models::Audio::TranscriptionSegment>, nil]
-
1
optional :segments, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Audio::TranscriptionSegment] }
-
-
# @!method initialize(duration:, language:, text:, segments: nil)
-
# @param duration [Float] The duration of the input audio.
-
#
-
# @param language [String] The language of the output translation (always `english`).
-
#
-
# @param text [String] The translated text.
-
#
-
# @param segments [Array<OpenAI::Models::Audio::TranscriptionSegment>] Segments of the translated text and their corresponding details.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module AudioModel
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
WHISPER_1 = :"whisper-1"
-
1
GPT_4O_TRANSCRIBE = :"gpt-4o-transcribe"
-
1
GPT_4O_MINI_TRANSCRIBE = :"gpt-4o-mini-transcribe"
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# The format of the output, in one of these options: `json`, `text`, `srt`,
-
# `verbose_json`, or `vtt`. For `gpt-4o-transcribe` and `gpt-4o-mini-transcribe`,
-
# the only supported format is `json`.
-
1
module AudioResponseFormat
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
JSON = :json
-
1
TEXT = :text
-
1
SRT = :srt
-
1
VERBOSE_JSON = :verbose_json
-
1
VTT = :vtt
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class AutoFileChunkingStrategyParam < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# Always `auto`.
-
#
-
# @return [Symbol, :auto]
-
1
required :type, const: :auto
-
-
# @!method initialize(type: :auto)
-
# The default strategy. This strategy currently uses a `max_chunk_size_tokens` of
-
# `800` and `chunk_overlap_tokens` of `400`.
-
#
-
# @param type [Symbol, :auto] Always `auto`.
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Batches#create
-
1
class Batch < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute completion_window
-
# The time frame within which the batch should be processed.
-
#
-
# @return [String]
-
1
required :completion_window, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the batch was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute endpoint
-
# The OpenAI API endpoint used by the batch.
-
#
-
# @return [String]
-
1
required :endpoint, String
-
-
# @!attribute input_file_id
-
# The ID of the input file for the batch.
-
#
-
# @return [String]
-
1
required :input_file_id, String
-
-
# @!attribute object
-
# The object type, which is always `batch`.
-
#
-
# @return [Symbol, :batch]
-
1
required :object, const: :batch
-
-
# @!attribute status
-
# The current status of the batch.
-
#
-
# @return [Symbol, OpenAI::Models::Batch::Status]
-
1
required :status, enum: -> { OpenAI::Batch::Status }
-
-
# @!attribute cancelled_at
-
# The Unix timestamp (in seconds) for when the batch was cancelled.
-
#
-
# @return [Integer, nil]
-
1
optional :cancelled_at, Integer
-
-
# @!attribute cancelling_at
-
# The Unix timestamp (in seconds) for when the batch started cancelling.
-
#
-
# @return [Integer, nil]
-
1
optional :cancelling_at, Integer
-
-
# @!attribute completed_at
-
# The Unix timestamp (in seconds) for when the batch was completed.
-
#
-
# @return [Integer, nil]
-
1
optional :completed_at, Integer
-
-
# @!attribute error_file_id
-
# The ID of the file containing the outputs of requests with errors.
-
#
-
# @return [String, nil]
-
1
optional :error_file_id, String
-
-
# @!attribute errors
-
#
-
# @return [OpenAI::Models::Batch::Errors, nil]
-
1
optional :errors, -> { OpenAI::Batch::Errors }
-
-
# @!attribute expired_at
-
# The Unix timestamp (in seconds) for when the batch expired.
-
#
-
# @return [Integer, nil]
-
1
optional :expired_at, Integer
-
-
# @!attribute expires_at
-
# The Unix timestamp (in seconds) for when the batch will expire.
-
#
-
# @return [Integer, nil]
-
1
optional :expires_at, Integer
-
-
# @!attribute failed_at
-
# The Unix timestamp (in seconds) for when the batch failed.
-
#
-
# @return [Integer, nil]
-
1
optional :failed_at, Integer
-
-
# @!attribute finalizing_at
-
# The Unix timestamp (in seconds) for when the batch started finalizing.
-
#
-
# @return [Integer, nil]
-
1
optional :finalizing_at, Integer
-
-
# @!attribute in_progress_at
-
# The Unix timestamp (in seconds) for when the batch started processing.
-
#
-
# @return [Integer, nil]
-
1
optional :in_progress_at, Integer
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute output_file_id
-
# The ID of the file containing the outputs of successfully executed requests.
-
#
-
# @return [String, nil]
-
1
optional :output_file_id, String
-
-
# @!attribute request_counts
-
# The request counts for different statuses within the batch.
-
#
-
# @return [OpenAI::Models::BatchRequestCounts, nil]
-
1
optional :request_counts, -> { OpenAI::BatchRequestCounts }
-
-
# @!method initialize(id:, completion_window:, created_at:, endpoint:, input_file_id:, status:, cancelled_at: nil, cancelling_at: nil, completed_at: nil, error_file_id: nil, errors: nil, expired_at: nil, expires_at: nil, failed_at: nil, finalizing_at: nil, in_progress_at: nil, metadata: nil, output_file_id: nil, request_counts: nil, object: :batch)
-
# Some parameter documentations has been truncated, see {OpenAI::Models::Batch}
-
# for more details.
-
#
-
# @param id [String]
-
#
-
# @param completion_window [String] The time frame within which the batch should be processed.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the batch was created.
-
#
-
# @param endpoint [String] The OpenAI API endpoint used by the batch.
-
#
-
# @param input_file_id [String] The ID of the input file for the batch.
-
#
-
# @param status [Symbol, OpenAI::Models::Batch::Status] The current status of the batch.
-
#
-
# @param cancelled_at [Integer] The Unix timestamp (in seconds) for when the batch was cancelled.
-
#
-
# @param cancelling_at [Integer] The Unix timestamp (in seconds) for when the batch started cancelling.
-
#
-
# @param completed_at [Integer] The Unix timestamp (in seconds) for when the batch was completed.
-
#
-
# @param error_file_id [String] The ID of the file containing the outputs of requests with errors.
-
#
-
# @param errors [OpenAI::Models::Batch::Errors]
-
#
-
# @param expired_at [Integer] The Unix timestamp (in seconds) for when the batch expired.
-
#
-
# @param expires_at [Integer] The Unix timestamp (in seconds) for when the batch will expire.
-
#
-
# @param failed_at [Integer] The Unix timestamp (in seconds) for when the batch failed.
-
#
-
# @param finalizing_at [Integer] The Unix timestamp (in seconds) for when the batch started finalizing.
-
#
-
# @param in_progress_at [Integer] The Unix timestamp (in seconds) for when the batch started processing.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param output_file_id [String] The ID of the file containing the outputs of successfully executed requests.
-
#
-
# @param request_counts [OpenAI::Models::BatchRequestCounts] The request counts for different statuses within the batch.
-
#
-
# @param object [Symbol, :batch] The object type, which is always `batch`.
-
-
# The current status of the batch.
-
#
-
# @see OpenAI::Models::Batch#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
VALIDATING = :validating
-
1
FAILED = :failed
-
1
IN_PROGRESS = :in_progress
-
1
FINALIZING = :finalizing
-
1
COMPLETED = :completed
-
1
EXPIRED = :expired
-
1
CANCELLING = :cancelling
-
1
CANCELLED = :cancelled
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @see OpenAI::Models::Batch#errors
-
1
class Errors < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
#
-
# @return [Array<OpenAI::Models::BatchError>, nil]
-
1
optional :data, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::BatchError] }
-
-
# @!attribute object
-
# The object type, which is always `list`.
-
#
-
# @return [String, nil]
-
1
optional :object, String
-
-
# @!method initialize(data: nil, object: nil)
-
# @param data [Array<OpenAI::Models::BatchError>]
-
#
-
# @param object [String] The object type, which is always `list`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Batches#cancel
-
1
class BatchCancelParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Batches#create
-
1
class BatchCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute completion_window
-
# The time frame within which the batch should be processed. Currently only `24h`
-
# is supported.
-
#
-
# @return [Symbol, OpenAI::Models::BatchCreateParams::CompletionWindow]
-
1
required :completion_window, enum: -> { OpenAI::BatchCreateParams::CompletionWindow }
-
-
# @!attribute endpoint
-
# The endpoint to be used for all requests in the batch. Currently
-
# `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
-
# are supported. Note that `/v1/embeddings` batches are also restricted to a
-
# maximum of 50,000 embedding inputs across all requests in the batch.
-
#
-
# @return [Symbol, OpenAI::Models::BatchCreateParams::Endpoint]
-
1
required :endpoint, enum: -> { OpenAI::BatchCreateParams::Endpoint }
-
-
# @!attribute input_file_id
-
# The ID of an uploaded file that contains requests for the new batch.
-
#
-
# See [upload file](https://platform.openai.com/docs/api-reference/files/create)
-
# for how to upload a file.
-
#
-
# Your input file must be formatted as a
-
# [JSONL file](https://platform.openai.com/docs/api-reference/batch/request-input),
-
# and must be uploaded with the purpose `batch`. The file can contain up to 50,000
-
# requests, and can be up to 200 MB in size.
-
#
-
# @return [String]
-
1
required :input_file_id, String
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!method initialize(completion_window:, endpoint:, input_file_id:, metadata: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::BatchCreateParams} for more details.
-
#
-
# @param completion_window [Symbol, OpenAI::Models::BatchCreateParams::CompletionWindow] The time frame within which the batch should be processed. Currently only `24h`
-
#
-
# @param endpoint [Symbol, OpenAI::Models::BatchCreateParams::Endpoint] The endpoint to be used for all requests in the batch. Currently `/v1/responses`
-
#
-
# @param input_file_id [String] The ID of an uploaded file that contains requests for the new batch.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# The time frame within which the batch should be processed. Currently only `24h`
-
# is supported.
-
1
module CompletionWindow
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
COMPLETION_WINDOW_24H = :"24h"
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The endpoint to be used for all requests in the batch. Currently
-
# `/v1/responses`, `/v1/chat/completions`, `/v1/embeddings`, and `/v1/completions`
-
# are supported. Note that `/v1/embeddings` batches are also restricted to a
-
# maximum of 50,000 embedding inputs across all requests in the batch.
-
1
module Endpoint
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
V1_RESPONSES = :"/v1/responses"
-
1
V1_CHAT_COMPLETIONS = :"/v1/chat/completions"
-
1
V1_EMBEDDINGS = :"/v1/embeddings"
-
1
V1_COMPLETIONS = :"/v1/completions"
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class BatchError < OpenAI::Internal::Type::BaseModel
-
# @!attribute code
-
# An error code identifying the error type.
-
#
-
# @return [String, nil]
-
1
optional :code, String
-
-
# @!attribute line
-
# The line number of the input file where the error occurred, if applicable.
-
#
-
# @return [Integer, nil]
-
1
optional :line, Integer, nil?: true
-
-
# @!attribute message
-
# A human-readable message providing more details about the error.
-
#
-
# @return [String, nil]
-
1
optional :message, String
-
-
# @!attribute param
-
# The name of the parameter that caused the error, if applicable.
-
#
-
# @return [String, nil]
-
1
optional :param, String, nil?: true
-
-
# @!method initialize(code: nil, line: nil, message: nil, param: nil)
-
# @param code [String] An error code identifying the error type.
-
#
-
# @param line [Integer, nil] The line number of the input file where the error occurred, if applicable.
-
#
-
# @param message [String] A human-readable message providing more details about the error.
-
#
-
# @param param [String, nil] The name of the parameter that caused the error, if applicable.
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Batches#list
-
1
class BatchListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute after
-
# A cursor for use in pagination. `after` is an object ID that defines your place
-
# in the list. For instance, if you make a list request and receive 100 objects,
-
# ending with obj_foo, your subsequent call can include after=obj_foo in order to
-
# fetch the next page of the list.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute limit
-
# A limit on the number of objects to be returned. Limit can range between 1 and
-
# 100, and the default is 20.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!method initialize(after: nil, limit: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::BatchListParams} for more details.
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class BatchRequestCounts < OpenAI::Internal::Type::BaseModel
-
# @!attribute completed
-
# Number of requests that have been completed successfully.
-
#
-
# @return [Integer]
-
1
required :completed, Integer
-
-
# @!attribute failed
-
# Number of requests that have failed.
-
#
-
# @return [Integer]
-
1
required :failed, Integer
-
-
# @!attribute total
-
# Total number of requests in the batch.
-
#
-
# @return [Integer]
-
1
required :total, Integer
-
-
# @!method initialize(completed:, failed:, total:)
-
# The request counts for different statuses within the batch.
-
#
-
# @param completed [Integer] Number of requests that have been completed successfully.
-
#
-
# @param failed [Integer] Number of requests that have failed.
-
#
-
# @param total [Integer] Total number of requests in the batch.
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Batches#retrieve
-
1
class BatchRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# @see OpenAI::Resources::Beta::Assistants#create
-
1
class Assistant < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The identifier, which can be referenced in API endpoints.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the assistant was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute description
-
# The description of the assistant. The maximum length is 512 characters.
-
#
-
# @return [String, nil]
-
1
required :description, String, nil?: true
-
-
# @!attribute instructions
-
# The system instructions that the assistant uses. The maximum length is 256,000
-
# characters.
-
#
-
# @return [String, nil]
-
1
required :instructions, String, nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute model
-
# ID of the model to use. You can use the
-
# [List models](https://platform.openai.com/docs/api-reference/models/list) API to
-
# see all of your available models, or see our
-
# [Model overview](https://platform.openai.com/docs/models) for descriptions of
-
# them.
-
#
-
# @return [String]
-
1
required :model, String
-
-
# @!attribute name
-
# The name of the assistant. The maximum length is 256 characters.
-
#
-
# @return [String, nil]
-
1
required :name, String, nil?: true
-
-
# @!attribute object
-
# The object type, which is always `assistant`.
-
#
-
# @return [Symbol, :assistant]
-
1
required :object, const: :assistant
-
-
# @!attribute tools
-
# A list of tool enabled on the assistant. There can be a maximum of 128 tools per
-
# assistant. Tools can be of types `code_interpreter`, `file_search`, or
-
# `function`.
-
#
-
# @return [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool>]
-
1
required :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool] }
-
-
# @!attribute response_format
-
# Specifies the format that the model must output. Compatible with
-
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
-
# [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
-
# and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
-
#
-
# Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
-
# Outputs which ensures the model will match your supplied JSON schema. Learn more
-
# in the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
-
# message the model generates is valid JSON.
-
#
-
# **Important:** when using JSON mode, you **must** also instruct the model to
-
# produce JSON yourself via a system or user message. Without this, the model may
-
# generate an unending stream of whitespace until the generation reaches the token
-
# limit, resulting in a long-running and seemingly "stuck" request. Also note that
-
# the message content may be partially cut off if `finish_reason="length"`, which
-
# indicates the generation exceeded `max_tokens` or the conversation exceeded the
-
# max context length.
-
#
-
# @return [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil]
-
1
optional :response_format, union: -> { OpenAI::Beta::AssistantResponseFormatOption }, nil?: true
-
-
# @!attribute temperature
-
# What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
-
# make the output more random, while lower values like 0.2 will make it more
-
# focused and deterministic.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float, nil?: true
-
-
# @!attribute tool_resources
-
# A set of resources that are used by the assistant's tools. The resources are
-
# specific to the type of tool. For example, the `code_interpreter` tool requires
-
# a list of file IDs, while the `file_search` tool requires a list of vector store
-
# IDs.
-
#
-
# @return [OpenAI::Models::Beta::Assistant::ToolResources, nil]
-
1
optional :tool_resources, -> { OpenAI::Beta::Assistant::ToolResources }, nil?: true
-
-
# @!attribute top_p
-
# An alternative to sampling with temperature, called nucleus sampling, where the
-
# model considers the results of the tokens with top_p probability mass. So 0.1
-
# means only the tokens comprising the top 10% probability mass are considered.
-
#
-
# We generally recommend altering this or temperature but not both.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float, nil?: true
-
-
# @!method initialize(id:, created_at:, description:, instructions:, metadata:, model:, name:, tools:, response_format: nil, temperature: nil, tool_resources: nil, top_p: nil, object: :assistant)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Assistant} for more details.
-
#
-
# Represents an `assistant` that can call the model and use tools.
-
#
-
# @param id [String] The identifier, which can be referenced in API endpoints.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the assistant was created.
-
#
-
# @param description [String, nil] The description of the assistant. The maximum length is 512 characters.
-
#
-
# @param instructions [String, nil] The system instructions that the assistant uses. The maximum length is 256,000 c
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param model [String] ID of the model to use. You can use the [List models](https://platform.openai.co
-
#
-
# @param name [String, nil] The name of the assistant. The maximum length is 256 characters.
-
#
-
# @param tools [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool>] A list of tool enabled on the assistant. There can be a maximum of 128 tools per
-
#
-
# @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https:
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param tool_resources [OpenAI::Models::Beta::Assistant::ToolResources, nil] A set of resources that are used by the assistant's tools. The resources are spe
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the
-
#
-
# @param object [Symbol, :assistant] The object type, which is always `assistant`.
-
-
# @see OpenAI::Models::Beta::Assistant#tool_resources
-
1
class ToolResources < OpenAI::Internal::Type::BaseModel
-
# @!attribute code_interpreter
-
#
-
# @return [OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter, nil]
-
1
optional :code_interpreter, -> { OpenAI::Beta::Assistant::ToolResources::CodeInterpreter }
-
-
# @!attribute file_search
-
#
-
# @return [OpenAI::Models::Beta::Assistant::ToolResources::FileSearch, nil]
-
1
optional :file_search, -> { OpenAI::Beta::Assistant::ToolResources::FileSearch }
-
-
# @!method initialize(code_interpreter: nil, file_search: nil)
-
# A set of resources that are used by the assistant's tools. The resources are
-
# specific to the type of tool. For example, the `code_interpreter` tool requires
-
# a list of file IDs, while the `file_search` tool requires a list of vector store
-
# IDs.
-
#
-
# @param code_interpreter [OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter]
-
# @param file_search [OpenAI::Models::Beta::Assistant::ToolResources::FileSearch]
-
-
# @see OpenAI::Models::Beta::Assistant::ToolResources#code_interpreter
-
1
class CodeInterpreter < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_ids
-
# A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
-
# available to the `code_interpreter`` tool. There can be a maximum of 20 files
-
# associated with the tool.
-
#
-
# @return [Array<String>, nil]
-
1
optional :file_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(file_ids: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Assistant::ToolResources::CodeInterpreter} for more
-
# details.
-
#
-
# @param file_ids [Array<String>] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
-
end
-
-
# @see OpenAI::Models::Beta::Assistant::ToolResources#file_search
-
1
class FileSearch < OpenAI::Internal::Type::BaseModel
-
# @!attribute vector_store_ids
-
# The ID of the
-
# [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
-
# attached to this assistant. There can be a maximum of 1 vector store attached to
-
# the assistant.
-
#
-
# @return [Array<String>, nil]
-
1
optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(vector_store_ids: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Assistant::ToolResources::FileSearch} for more details.
-
#
-
# @param vector_store_ids [Array<String>] The ID of the [vector store](https://platform.openai.com/docs/api-reference/vect
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# @see OpenAI::Resources::Beta::Assistants#create
-
1
class AssistantCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute model
-
# ID of the model to use. You can use the
-
# [List models](https://platform.openai.com/docs/api-reference/models/list) API to
-
# see all of your available models, or see our
-
# [Model overview](https://platform.openai.com/docs/models) for descriptions of
-
# them.
-
#
-
# @return [String, Symbol, OpenAI::Models::ChatModel]
-
1
required :model, union: -> { OpenAI::Beta::AssistantCreateParams::Model }
-
-
# @!attribute description
-
# The description of the assistant. The maximum length is 512 characters.
-
#
-
# @return [String, nil]
-
1
optional :description, String, nil?: true
-
-
# @!attribute instructions
-
# The system instructions that the assistant uses. The maximum length is 256,000
-
# characters.
-
#
-
# @return [String, nil]
-
1
optional :instructions, String, nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute name
-
# The name of the assistant. The maximum length is 256 characters.
-
#
-
# @return [String, nil]
-
1
optional :name, String, nil?: true
-
-
# @!attribute reasoning_effort
-
# **o-series models only**
-
#
-
# Constrains effort on reasoning for
-
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-
# supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-
# result in faster responses and fewer tokens used on reasoning in a response.
-
#
-
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
-
1
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
-
-
# @!attribute response_format
-
# Specifies the format that the model must output. Compatible with
-
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
-
# [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
-
# and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
-
#
-
# Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
-
# Outputs which ensures the model will match your supplied JSON schema. Learn more
-
# in the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
-
# message the model generates is valid JSON.
-
#
-
# **Important:** when using JSON mode, you **must** also instruct the model to
-
# produce JSON yourself via a system or user message. Without this, the model may
-
# generate an unending stream of whitespace until the generation reaches the token
-
# limit, resulting in a long-running and seemingly "stuck" request. Also note that
-
# the message content may be partially cut off if `finish_reason="length"`, which
-
# indicates the generation exceeded `max_tokens` or the conversation exceeded the
-
# max context length.
-
#
-
# @return [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil]
-
1
optional :response_format, union: -> { OpenAI::Beta::AssistantResponseFormatOption }, nil?: true
-
-
# @!attribute temperature
-
# What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
-
# make the output more random, while lower values like 0.2 will make it more
-
# focused and deterministic.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float, nil?: true
-
-
# @!attribute tool_resources
-
# A set of resources that are used by the assistant's tools. The resources are
-
# specific to the type of tool. For example, the `code_interpreter` tool requires
-
# a list of file IDs, while the `file_search` tool requires a list of vector store
-
# IDs.
-
#
-
# @return [OpenAI::Models::Beta::AssistantCreateParams::ToolResources, nil]
-
1
optional :tool_resources, -> { OpenAI::Beta::AssistantCreateParams::ToolResources }, nil?: true
-
-
# @!attribute tools
-
# A list of tool enabled on the assistant. There can be a maximum of 128 tools per
-
# assistant. Tools can be of types `code_interpreter`, `file_search`, or
-
# `function`.
-
#
-
# @return [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool>, nil]
-
1
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool] }
-
-
# @!attribute top_p
-
# An alternative to sampling with temperature, called nucleus sampling, where the
-
# model considers the results of the tokens with top_p probability mass. So 0.1
-
# means only the tokens comprising the top 10% probability mass are considered.
-
#
-
# We generally recommend altering this or temperature but not both.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float, nil?: true
-
-
# @!method initialize(model:, description: nil, instructions: nil, metadata: nil, name: nil, reasoning_effort: nil, response_format: nil, temperature: nil, tool_resources: nil, tools: nil, top_p: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantCreateParams} for more details.
-
#
-
# @param model [String, Symbol, OpenAI::Models::ChatModel] ID of the model to use. You can use the [List models](https://platform.openai.co
-
#
-
# @param description [String, nil] The description of the assistant. The maximum length is 512 characters.
-
#
-
# @param instructions [String, nil] The system instructions that the assistant uses. The maximum length is 256,000 c
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param name [String, nil] The name of the assistant. The maximum length is 256 characters.
-
#
-
# @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
-
#
-
# @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https:
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param tool_resources [OpenAI::Models::Beta::AssistantCreateParams::ToolResources, nil] A set of resources that are used by the assistant's tools. The resources are spe
-
#
-
# @param tools [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool>] A list of tool enabled on the assistant. There can be a maximum of 128 tools per
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# ID of the model to use. You can use the
-
# [List models](https://platform.openai.com/docs/api-reference/models/list) API to
-
# see all of your available models, or see our
-
# [Model overview](https://platform.openai.com/docs/models) for descriptions of
-
# them.
-
1
module Model
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
# ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models) for descriptions of them.
-
1
variant enum: -> { OpenAI::ChatModel }
-
-
# @!method self.variants
-
# @return [Array(String, Symbol, OpenAI::Models::ChatModel)]
-
end
-
-
1
class ToolResources < OpenAI::Internal::Type::BaseModel
-
# @!attribute code_interpreter
-
#
-
# @return [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter, nil]
-
1
optional :code_interpreter,
-
-> {
-
OpenAI::Beta::AssistantCreateParams::ToolResources::CodeInterpreter
-
}
-
-
# @!attribute file_search
-
#
-
# @return [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch, nil]
-
1
optional :file_search, -> { OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch }
-
-
# @!method initialize(code_interpreter: nil, file_search: nil)
-
# A set of resources that are used by the assistant's tools. The resources are
-
# specific to the type of tool. For example, the `code_interpreter` tool requires
-
# a list of file IDs, while the `file_search` tool requires a list of vector store
-
# IDs.
-
#
-
# @param code_interpreter [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter]
-
# @param file_search [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch]
-
-
# @see OpenAI::Models::Beta::AssistantCreateParams::ToolResources#code_interpreter
-
1
class CodeInterpreter < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_ids
-
# A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
-
# available to the `code_interpreter` tool. There can be a maximum of 20 files
-
# associated with the tool.
-
#
-
# @return [Array<String>, nil]
-
1
optional :file_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(file_ids: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantCreateParams::ToolResources::CodeInterpreter}
-
# for more details.
-
#
-
# @param file_ids [Array<String>] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
-
end
-
-
# @see OpenAI::Models::Beta::AssistantCreateParams::ToolResources#file_search
-
1
class FileSearch < OpenAI::Internal::Type::BaseModel
-
# @!attribute vector_store_ids
-
# The
-
# [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
-
# attached to this assistant. There can be a maximum of 1 vector store attached to
-
# the assistant.
-
#
-
# @return [Array<String>, nil]
-
1
optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute vector_stores
-
# A helper to create a
-
# [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
-
# with file_ids and attach it to this assistant. There can be a maximum of 1
-
# vector store attached to the assistant.
-
#
-
# @return [Array<OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore>, nil]
-
1
optional :vector_stores,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore]
-
}
-
-
# @!method initialize(vector_store_ids: nil, vector_stores: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch} for
-
# more details.
-
#
-
# @param vector_store_ids [Array<String>] The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/
-
#
-
# @param vector_stores [Array<OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore>] A helper to create a [vector store](https://platform.openai.com/docs/api-referen
-
-
1
class VectorStore < OpenAI::Internal::Type::BaseModel
-
# @!attribute chunking_strategy
-
# The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
# strategy.
-
#
-
# @return [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static, nil]
-
1
optional :chunking_strategy,
-
union: -> {
-
OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy
-
}
-
-
# @!attribute file_ids
-
# A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
-
# add to the vector store. There can be a maximum of 10000 files in a vector
-
# store.
-
#
-
# @return [Array<String>, nil]
-
1
optional :file_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!method initialize(chunking_strategy: nil, file_ids: nil, metadata: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore}
-
# for more details.
-
#
-
# @param chunking_strategy [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static] The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
#
-
# @param file_ids [Array<String>] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to ad
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
-
# The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
# strategy.
-
#
-
# @see OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore#chunking_strategy
-
1
module ChunkingStrategy
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`.
-
1
variant :auto,
-
-> {
-
OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto
-
}
-
-
1
variant :static,
-
-> {
-
OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static
-
}
-
-
1
class Auto < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# Always `auto`.
-
#
-
# @return [Symbol, :auto]
-
1
required :type, const: :auto
-
-
# @!method initialize(type: :auto)
-
# The default strategy. This strategy currently uses a `max_chunk_size_tokens` of
-
# `800` and `chunk_overlap_tokens` of `400`.
-
#
-
# @param type [Symbol, :auto] Always `auto`.
-
end
-
-
1
class Static < OpenAI::Internal::Type::BaseModel
-
# @!attribute static
-
#
-
# @return [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static]
-
1
required :static,
-
-> {
-
OpenAI::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static
-
}
-
-
# @!attribute type
-
# Always `static`.
-
#
-
# @return [Symbol, :static]
-
1
required :type, const: :static
-
-
# @!method initialize(static:, type: :static)
-
# @param static [OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static]
-
#
-
# @param type [Symbol, :static] Always `static`.
-
-
# @see OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static#static
-
1
class Static < OpenAI::Internal::Type::BaseModel
-
# @!attribute chunk_overlap_tokens
-
# The number of tokens that overlap between chunks. The default value is `400`.
-
#
-
# Note that the overlap must not exceed half of `max_chunk_size_tokens`.
-
#
-
# @return [Integer]
-
1
required :chunk_overlap_tokens, Integer
-
-
# @!attribute max_chunk_size_tokens
-
# The maximum number of tokens in each chunk. The default value is `800`. The
-
# minimum value is `100` and the maximum value is `4096`.
-
#
-
# @return [Integer]
-
1
required :max_chunk_size_tokens, Integer
-
-
# @!method initialize(chunk_overlap_tokens:, max_chunk_size_tokens:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static}
-
# for more details.
-
#
-
# @param chunk_overlap_tokens [Integer] The number of tokens that overlap between chunks. The default value is `400`.
-
#
-
# @param max_chunk_size_tokens [Integer] The maximum number of tokens in each chunk. The default value is `800`. The mini
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::AssistantCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static)]
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# @see OpenAI::Resources::Beta::Assistants#delete
-
1
class AssistantDeleteParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# @see OpenAI::Resources::Beta::Assistants#delete
-
1
class AssistantDeleted < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute deleted
-
#
-
# @return [Boolean]
-
1
required :deleted, OpenAI::Internal::Type::Boolean
-
-
# @!attribute object
-
#
-
# @return [Symbol, :"assistant.deleted"]
-
1
required :object, const: :"assistant.deleted"
-
-
# @!method initialize(id:, deleted:, object: :"assistant.deleted")
-
# @param id [String]
-
# @param deleted [Boolean]
-
# @param object [Symbol, :"assistant.deleted"]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# @see OpenAI::Resources::Beta::Assistants#list
-
1
class AssistantListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute after
-
# A cursor for use in pagination. `after` is an object ID that defines your place
-
# in the list. For instance, if you make a list request and receive 100 objects,
-
# ending with obj_foo, your subsequent call can include after=obj_foo in order to
-
# fetch the next page of the list.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute before
-
# A cursor for use in pagination. `before` is an object ID that defines your place
-
# in the list. For instance, if you make a list request and receive 100 objects,
-
# starting with obj_foo, your subsequent call can include before=obj_foo in order
-
# to fetch the previous page of the list.
-
#
-
# @return [String, nil]
-
1
optional :before, String
-
-
# @!attribute limit
-
# A limit on the number of objects to be returned. Limit can range between 1 and
-
# 100, and the default is 20.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!attribute order
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::AssistantListParams::Order, nil]
-
1
optional :order, enum: -> { OpenAI::Beta::AssistantListParams::Order }
-
-
# @!method initialize(after: nil, before: nil, limit: nil, order: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantListParams} for more details.
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param order [Symbol, OpenAI::Models::Beta::AssistantListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
1
module Order
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASC = :asc
-
1
DESC = :desc
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# Specifies the format that the model must output. Compatible with
-
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
-
# [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
-
# and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
-
#
-
# Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
-
# Outputs which ensures the model will match your supplied JSON schema. Learn more
-
# in the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
-
# message the model generates is valid JSON.
-
#
-
# **Important:** when using JSON mode, you **must** also instruct the model to
-
# produce JSON yourself via a system or user message. Without this, the model may
-
# generate an unending stream of whitespace until the generation reaches the token
-
# limit, resulting in a long-running and seemingly "stuck" request. Also note that
-
# the message content may be partially cut off if `finish_reason="length"`, which
-
# indicates the generation exceeded `max_tokens` or the conversation exceeded the
-
# max context length.
-
1
module AssistantResponseFormatOption
-
1
extend OpenAI::Internal::Type::Union
-
-
# `auto` is the default value
-
1
variant const: :auto
-
-
# Default response format. Used to generate text responses.
-
1
variant -> { OpenAI::ResponseFormatText }
-
-
# JSON object response format. An older method of generating JSON responses.
-
# Using `json_schema` is recommended for models that support it. Note that the
-
# model will not generate JSON without a system or user message instructing it
-
# to do so.
-
1
variant -> { OpenAI::ResponseFormatJSONObject }
-
-
# JSON Schema response format. Used to generate structured JSON responses.
-
# Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
-
1
variant -> { OpenAI::ResponseFormatJSONSchema }
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# @see OpenAI::Resources::Beta::Assistants#retrieve
-
1
class AssistantRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# Represents an event emitted when streaming a Run.
-
#
-
# Each event in a server-sent events stream has an `event` and `data` property:
-
#
-
# ```
-
# event: thread.created
-
# data: {"id": "thread_123", "object": "thread", ...}
-
# ```
-
#
-
# We emit events whenever a new object is created, transitions to a new state, or
-
# is being streamed in parts (deltas). For example, we emit `thread.run.created`
-
# when a new run is created, `thread.run.completed` when a run completes, and so
-
# on. When an Assistant chooses to create a message during a run, we emit a
-
# `thread.message.created event`, a `thread.message.in_progress` event, many
-
# `thread.message.delta` events, and finally a `thread.message.completed` event.
-
#
-
# We may add additional events over time, so we recommend handling unknown events
-
# gracefully in your code. See the
-
# [Assistants API quickstart](https://platform.openai.com/docs/assistants/overview)
-
# to learn how to integrate the Assistants API with streaming.
-
1
module AssistantStreamEvent
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :event
-
-
# Occurs when a new [thread](https://platform.openai.com/docs/api-reference/threads/object) is created.
-
1
variant :"thread.created", -> { OpenAI::Beta::AssistantStreamEvent::ThreadCreated }
-
-
# Occurs when a new [run](https://platform.openai.com/docs/api-reference/runs/object) is created.
-
1
variant :"thread.run.created", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunCreated }
-
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `queued` status.
-
1
variant :"thread.run.queued", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunQueued }
-
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to an `in_progress` status.
-
1
variant :"thread.run.in_progress", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunInProgress }
-
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `requires_action` status.
-
1
variant :"thread.run.requires_action",
-
-> {
-
OpenAI::Beta::AssistantStreamEvent::ThreadRunRequiresAction
-
}
-
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is completed.
-
1
variant :"thread.run.completed", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunCompleted }
-
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) ends with status `incomplete`.
-
1
variant :"thread.run.incomplete", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunIncomplete }
-
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) fails.
-
1
variant :"thread.run.failed", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunFailed }
-
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `cancelling` status.
-
1
variant :"thread.run.cancelling", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunCancelling }
-
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is cancelled.
-
1
variant :"thread.run.cancelled", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunCancelled }
-
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) expires.
-
1
variant :"thread.run.expired", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunExpired }
-
-
# Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is created.
-
1
variant :"thread.run.step.created", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunStepCreated }
-
-
# Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) moves to an `in_progress` state.
-
1
variant :"thread.run.step.in_progress",
-
-> {
-
OpenAI::Beta::AssistantStreamEvent::ThreadRunStepInProgress
-
}
-
-
# Occurs when parts of a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) are being streamed.
-
1
variant :"thread.run.step.delta", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunStepDelta }
-
-
# Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is completed.
-
1
variant :"thread.run.step.completed",
-
-> {
-
OpenAI::Beta::AssistantStreamEvent::ThreadRunStepCompleted
-
}
-
-
# Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) fails.
-
1
variant :"thread.run.step.failed", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunStepFailed }
-
-
# Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is cancelled.
-
1
variant :"thread.run.step.cancelled",
-
-> {
-
OpenAI::Beta::AssistantStreamEvent::ThreadRunStepCancelled
-
}
-
-
# Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) expires.
-
1
variant :"thread.run.step.expired", -> { OpenAI::Beta::AssistantStreamEvent::ThreadRunStepExpired }
-
-
# Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is created.
-
1
variant :"thread.message.created", -> { OpenAI::Beta::AssistantStreamEvent::ThreadMessageCreated }
-
-
# Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) moves to an `in_progress` state.
-
1
variant :"thread.message.in_progress",
-
-> {
-
OpenAI::Beta::AssistantStreamEvent::ThreadMessageInProgress
-
}
-
-
# Occurs when parts of a [Message](https://platform.openai.com/docs/api-reference/messages/object) are being streamed.
-
1
variant :"thread.message.delta", -> { OpenAI::Beta::AssistantStreamEvent::ThreadMessageDelta }
-
-
# Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is completed.
-
1
variant :"thread.message.completed", -> { OpenAI::Beta::AssistantStreamEvent::ThreadMessageCompleted }
-
-
# Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) ends before it is completed.
-
1
variant :"thread.message.incomplete",
-
-> {
-
OpenAI::Beta::AssistantStreamEvent::ThreadMessageIncomplete
-
}
-
-
# Occurs when an [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs. This can happen due to an internal server error or a timeout.
-
1
variant :error, -> { OpenAI::Beta::AssistantStreamEvent::ErrorEvent }
-
-
1
class ThreadCreated < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a thread that contains
-
# [messages](https://platform.openai.com/docs/api-reference/messages).
-
#
-
# @return [OpenAI::Models::Beta::Thread]
-
1
required :data, -> { OpenAI::Beta::Thread }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.created"]
-
1
required :event, const: :"thread.created"
-
-
# @!attribute enabled
-
# Whether to enable input audio transcription.
-
#
-
# @return [Boolean, nil]
-
1
optional :enabled, OpenAI::Internal::Type::Boolean
-
-
# @!method initialize(data:, enabled: nil, event: :"thread.created")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated} for more details.
-
#
-
# Occurs when a new
-
# [thread](https://platform.openai.com/docs/api-reference/threads/object) is
-
# created.
-
#
-
# @param data [OpenAI::Models::Beta::Thread] Represents a thread that contains [messages](https://platform.openai.com/docs/ap
-
#
-
# @param enabled [Boolean] Whether to enable input audio transcription.
-
#
-
# @param event [Symbol, :"thread.created"]
-
end
-
-
1
class ThreadRunCreated < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.created"]
-
1
required :event, const: :"thread.run.created"
-
-
# @!method initialize(data:, event: :"thread.run.created")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated} for more details.
-
#
-
# Occurs when a new
-
# [run](https://platform.openai.com/docs/api-reference/runs/object) is created.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.created"]
-
end
-
-
1
class ThreadRunQueued < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.queued"]
-
1
required :event, const: :"thread.run.queued"
-
-
# @!method initialize(data:, event: :"thread.run.queued")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued} for more details.
-
#
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
-
# moves to a `queued` status.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.queued"]
-
end
-
-
1
class ThreadRunInProgress < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.in_progress"]
-
1
required :event, const: :"thread.run.in_progress"
-
-
# @!method initialize(data:, event: :"thread.run.in_progress")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress} for more
-
# details.
-
#
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
-
# moves to an `in_progress` status.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.in_progress"]
-
end
-
-
1
class ThreadRunRequiresAction < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.requires_action"]
-
1
required :event, const: :"thread.run.requires_action"
-
-
# @!method initialize(data:, event: :"thread.run.requires_action")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction} for more
-
# details.
-
#
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
-
# moves to a `requires_action` status.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.requires_action"]
-
end
-
-
1
class ThreadRunCompleted < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.completed"]
-
1
required :event, const: :"thread.run.completed"
-
-
# @!method initialize(data:, event: :"thread.run.completed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted} for more
-
# details.
-
#
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
-
# is completed.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.completed"]
-
end
-
-
1
class ThreadRunIncomplete < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.incomplete"]
-
1
required :event, const: :"thread.run.incomplete"
-
-
# @!method initialize(data:, event: :"thread.run.incomplete")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete} for more
-
# details.
-
#
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
-
# ends with status `incomplete`.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.incomplete"]
-
end
-
-
1
class ThreadRunFailed < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.failed"]
-
1
required :event, const: :"thread.run.failed"
-
-
# @!method initialize(data:, event: :"thread.run.failed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed} for more details.
-
#
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
-
# fails.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.failed"]
-
end
-
-
1
class ThreadRunCancelling < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.cancelling"]
-
1
required :event, const: :"thread.run.cancelling"
-
-
# @!method initialize(data:, event: :"thread.run.cancelling")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling} for more
-
# details.
-
#
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
-
# moves to a `cancelling` status.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.cancelling"]
-
end
-
-
1
class ThreadRunCancelled < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.cancelled"]
-
1
required :event, const: :"thread.run.cancelled"
-
-
# @!method initialize(data:, event: :"thread.run.cancelled")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled} for more
-
# details.
-
#
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
-
# is cancelled.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.cancelled"]
-
end
-
-
1
class ThreadRunExpired < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.expired"]
-
1
required :event, const: :"thread.run.expired"
-
-
# @!method initialize(data:, event: :"thread.run.expired")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired} for more details.
-
#
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
-
# expires.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.expired"]
-
end
-
-
1
class ThreadRunStepCreated < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a step in execution of a run.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStep]
-
1
required :data, -> { OpenAI::Beta::Threads::Runs::RunStep }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.step.created"]
-
1
required :event, const: :"thread.run.step.created"
-
-
# @!method initialize(data:, event: :"thread.run.step.created")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated} for more
-
# details.
-
#
-
# Occurs when a
-
# [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)
-
# is created.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run.
-
#
-
# @param event [Symbol, :"thread.run.step.created"]
-
end
-
-
1
class ThreadRunStepInProgress < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a step in execution of a run.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStep]
-
1
required :data, -> { OpenAI::Beta::Threads::Runs::RunStep }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.step.in_progress"]
-
1
required :event, const: :"thread.run.step.in_progress"
-
-
# @!method initialize(data:, event: :"thread.run.step.in_progress")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress} for more
-
# details.
-
#
-
# Occurs when a
-
# [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)
-
# moves to an `in_progress` state.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run.
-
#
-
# @param event [Symbol, :"thread.run.step.in_progress"]
-
end
-
-
1
class ThreadRunStepDelta < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a run step delta i.e. any changed fields on a run step during
-
# streaming.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent]
-
1
required :data, -> { OpenAI::Beta::Threads::Runs::RunStepDeltaEvent }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.step.delta"]
-
1
required :event, const: :"thread.run.step.delta"
-
-
# @!method initialize(data:, event: :"thread.run.step.delta")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta} for more
-
# details.
-
#
-
# Occurs when parts of a
-
# [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)
-
# are being streamed.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent] Represents a run step delta i.e. any changed fields on a run step during streami
-
#
-
# @param event [Symbol, :"thread.run.step.delta"]
-
end
-
-
1
class ThreadRunStepCompleted < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a step in execution of a run.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStep]
-
1
required :data, -> { OpenAI::Beta::Threads::Runs::RunStep }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.step.completed"]
-
1
required :event, const: :"thread.run.step.completed"
-
-
# @!method initialize(data:, event: :"thread.run.step.completed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted} for more
-
# details.
-
#
-
# Occurs when a
-
# [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)
-
# is completed.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run.
-
#
-
# @param event [Symbol, :"thread.run.step.completed"]
-
end
-
-
1
class ThreadRunStepFailed < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a step in execution of a run.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStep]
-
1
required :data, -> { OpenAI::Beta::Threads::Runs::RunStep }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.step.failed"]
-
1
required :event, const: :"thread.run.step.failed"
-
-
# @!method initialize(data:, event: :"thread.run.step.failed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed} for more
-
# details.
-
#
-
# Occurs when a
-
# [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)
-
# fails.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run.
-
#
-
# @param event [Symbol, :"thread.run.step.failed"]
-
end
-
-
1
class ThreadRunStepCancelled < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a step in execution of a run.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStep]
-
1
required :data, -> { OpenAI::Beta::Threads::Runs::RunStep }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.step.cancelled"]
-
1
required :event, const: :"thread.run.step.cancelled"
-
-
# @!method initialize(data:, event: :"thread.run.step.cancelled")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled} for more
-
# details.
-
#
-
# Occurs when a
-
# [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)
-
# is cancelled.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run.
-
#
-
# @param event [Symbol, :"thread.run.step.cancelled"]
-
end
-
-
1
class ThreadRunStepExpired < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a step in execution of a run.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStep]
-
1
required :data, -> { OpenAI::Beta::Threads::Runs::RunStep }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.step.expired"]
-
1
required :event, const: :"thread.run.step.expired"
-
-
# @!method initialize(data:, event: :"thread.run.step.expired")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired} for more
-
# details.
-
#
-
# Occurs when a
-
# [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)
-
# expires.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run.
-
#
-
# @param event [Symbol, :"thread.run.step.expired"]
-
end
-
-
1
class ThreadMessageCreated < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a message within a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Message]
-
1
required :data, -> { OpenAI::Beta::Threads::Message }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.message.created"]
-
1
required :event, const: :"thread.message.created"
-
-
# @!method initialize(data:, event: :"thread.message.created")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated} for more
-
# details.
-
#
-
# Occurs when a
-
# [message](https://platform.openai.com/docs/api-reference/messages/object) is
-
# created.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Message] Represents a message within a [thread](https://platform.openai.com/docs/api-refe
-
#
-
# @param event [Symbol, :"thread.message.created"]
-
end
-
-
1
class ThreadMessageInProgress < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a message within a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Message]
-
1
required :data, -> { OpenAI::Beta::Threads::Message }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.message.in_progress"]
-
1
required :event, const: :"thread.message.in_progress"
-
-
# @!method initialize(data:, event: :"thread.message.in_progress")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress} for more
-
# details.
-
#
-
# Occurs when a
-
# [message](https://platform.openai.com/docs/api-reference/messages/object) moves
-
# to an `in_progress` state.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Message] Represents a message within a [thread](https://platform.openai.com/docs/api-refe
-
#
-
# @param event [Symbol, :"thread.message.in_progress"]
-
end
-
-
1
class ThreadMessageDelta < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a message delta i.e. any changed fields on a message during
-
# streaming.
-
#
-
# @return [OpenAI::Models::Beta::Threads::MessageDeltaEvent]
-
1
required :data, -> { OpenAI::Beta::Threads::MessageDeltaEvent }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.message.delta"]
-
1
required :event, const: :"thread.message.delta"
-
-
# @!method initialize(data:, event: :"thread.message.delta")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta} for more
-
# details.
-
#
-
# Occurs when parts of a
-
# [Message](https://platform.openai.com/docs/api-reference/messages/object) are
-
# being streamed.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::MessageDeltaEvent] Represents a message delta i.e. any changed fields on a message during streaming
-
#
-
# @param event [Symbol, :"thread.message.delta"]
-
end
-
-
1
class ThreadMessageCompleted < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a message within a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Message]
-
1
required :data, -> { OpenAI::Beta::Threads::Message }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.message.completed"]
-
1
required :event, const: :"thread.message.completed"
-
-
# @!method initialize(data:, event: :"thread.message.completed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted} for more
-
# details.
-
#
-
# Occurs when a
-
# [message](https://platform.openai.com/docs/api-reference/messages/object) is
-
# completed.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Message] Represents a message within a [thread](https://platform.openai.com/docs/api-refe
-
#
-
# @param event [Symbol, :"thread.message.completed"]
-
end
-
-
1
class ThreadMessageIncomplete < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a message within a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Message]
-
1
required :data, -> { OpenAI::Beta::Threads::Message }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.message.incomplete"]
-
1
required :event, const: :"thread.message.incomplete"
-
-
# @!method initialize(data:, event: :"thread.message.incomplete")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete} for more
-
# details.
-
#
-
# Occurs when a
-
# [message](https://platform.openai.com/docs/api-reference/messages/object) ends
-
# before it is completed.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Message] Represents a message within a [thread](https://platform.openai.com/docs/api-refe
-
#
-
# @param event [Symbol, :"thread.message.incomplete"]
-
end
-
-
1
class ErrorEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
#
-
# @return [OpenAI::Models::ErrorObject]
-
1
required :data, -> { OpenAI::ErrorObject }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :error]
-
1
required :event, const: :error
-
-
# @!method initialize(data:, event: :error)
-
# Occurs when an
-
# [error](https://platform.openai.com/docs/guides/error-codes#api-errors) occurs.
-
# This can happen due to an internal server error or a timeout.
-
#
-
# @param data [OpenAI::Models::ErrorObject]
-
# @param event [Symbol, :error]
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete, OpenAI::Models::Beta::AssistantStreamEvent::ErrorEvent)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module AssistantTool
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :code_interpreter, -> { OpenAI::Beta::CodeInterpreterTool }
-
-
1
variant :file_search, -> { OpenAI::Beta::FileSearchTool }
-
-
1
variant :function, -> { OpenAI::Beta::FunctionTool }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
class AssistantToolChoice < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of the tool. If type is `function`, the function name must be set
-
#
-
# @return [Symbol, OpenAI::Models::Beta::AssistantToolChoice::Type]
-
1
required :type, enum: -> { OpenAI::Beta::AssistantToolChoice::Type }
-
-
# @!attribute function
-
#
-
# @return [OpenAI::Models::Beta::AssistantToolChoiceFunction, nil]
-
1
optional :function, -> { OpenAI::Beta::AssistantToolChoiceFunction }
-
-
# @!method initialize(type:, function: nil)
-
# Specifies a tool the model should use. Use to force the model to call a specific
-
# tool.
-
#
-
# @param type [Symbol, OpenAI::Models::Beta::AssistantToolChoice::Type] The type of the tool. If type is `function`, the function name must be set
-
#
-
# @param function [OpenAI::Models::Beta::AssistantToolChoiceFunction]
-
-
# The type of the tool. If type is `function`, the function name must be set
-
#
-
# @see OpenAI::Models::Beta::AssistantToolChoice#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
FUNCTION = :function
-
1
CODE_INTERPRETER = :code_interpreter
-
1
FILE_SEARCH = :file_search
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
class AssistantToolChoiceFunction < OpenAI::Internal::Type::BaseModel
-
# @!attribute name
-
# The name of the function to call.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!method initialize(name:)
-
# @param name [String] The name of the function to call.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# Controls which (if any) tool is called by the model. `none` means the model will
-
# not call any tools and instead generates a message. `auto` is the default value
-
# and means the model can pick between generating a message or calling one or more
-
# tools. `required` means the model must call one or more tools before responding
-
# to the user. Specifying a particular tool like `{"type": "file_search"}` or
-
# `{"type": "function", "function": {"name": "my_function"}}` forces the model to
-
# call that tool.
-
1
module AssistantToolChoiceOption
-
1
extend OpenAI::Internal::Type::Union
-
-
# `none` means the model will not call any tools and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools before responding to the user.
-
1
variant enum: -> { OpenAI::Beta::AssistantToolChoiceOption::Auto }
-
-
# Specifies a tool the model should use. Use to force the model to call a specific tool.
-
1
variant -> { OpenAI::Beta::AssistantToolChoice }
-
-
# `none` means the model will not call any tools and instead generates a message.
-
# `auto` means the model can pick between generating a message or calling one or
-
# more tools. `required` means the model must call one or more tools before
-
# responding to the user.
-
1
module Auto
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
NONE = :none
-
1
AUTO = :auto
-
1
REQUIRED = :required
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @!method self.variants
-
# @return [Array(Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# @see OpenAI::Resources::Beta::Assistants#update
-
1
class AssistantUpdateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute description
-
# The description of the assistant. The maximum length is 512 characters.
-
#
-
# @return [String, nil]
-
1
optional :description, String, nil?: true
-
-
# @!attribute instructions
-
# The system instructions that the assistant uses. The maximum length is 256,000
-
# characters.
-
#
-
# @return [String, nil]
-
1
optional :instructions, String, nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute model
-
# ID of the model to use. You can use the
-
# [List models](https://platform.openai.com/docs/api-reference/models/list) API to
-
# see all of your available models, or see our
-
# [Model overview](https://platform.openai.com/docs/models) for descriptions of
-
# them.
-
#
-
# @return [String, Symbol, OpenAI::Models::Beta::AssistantUpdateParams::Model, nil]
-
1
optional :model, union: -> { OpenAI::Beta::AssistantUpdateParams::Model }
-
-
# @!attribute name
-
# The name of the assistant. The maximum length is 256 characters.
-
#
-
# @return [String, nil]
-
1
optional :name, String, nil?: true
-
-
# @!attribute reasoning_effort
-
# **o-series models only**
-
#
-
# Constrains effort on reasoning for
-
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-
# supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-
# result in faster responses and fewer tokens used on reasoning in a response.
-
#
-
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
-
1
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
-
-
# @!attribute response_format
-
# Specifies the format that the model must output. Compatible with
-
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
-
# [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
-
# and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
-
#
-
# Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
-
# Outputs which ensures the model will match your supplied JSON schema. Learn more
-
# in the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
-
# message the model generates is valid JSON.
-
#
-
# **Important:** when using JSON mode, you **must** also instruct the model to
-
# produce JSON yourself via a system or user message. Without this, the model may
-
# generate an unending stream of whitespace until the generation reaches the token
-
# limit, resulting in a long-running and seemingly "stuck" request. Also note that
-
# the message content may be partially cut off if `finish_reason="length"`, which
-
# indicates the generation exceeded `max_tokens` or the conversation exceeded the
-
# max context length.
-
#
-
# @return [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil]
-
1
optional :response_format, union: -> { OpenAI::Beta::AssistantResponseFormatOption }, nil?: true
-
-
# @!attribute temperature
-
# What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
-
# make the output more random, while lower values like 0.2 will make it more
-
# focused and deterministic.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float, nil?: true
-
-
# @!attribute tool_resources
-
# A set of resources that are used by the assistant's tools. The resources are
-
# specific to the type of tool. For example, the `code_interpreter` tool requires
-
# a list of file IDs, while the `file_search` tool requires a list of vector store
-
# IDs.
-
#
-
# @return [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources, nil]
-
1
optional :tool_resources, -> { OpenAI::Beta::AssistantUpdateParams::ToolResources }, nil?: true
-
-
# @!attribute tools
-
# A list of tool enabled on the assistant. There can be a maximum of 128 tools per
-
# assistant. Tools can be of types `code_interpreter`, `file_search`, or
-
# `function`.
-
#
-
# @return [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool>, nil]
-
1
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool] }
-
-
# @!attribute top_p
-
# An alternative to sampling with temperature, called nucleus sampling, where the
-
# model considers the results of the tokens with top_p probability mass. So 0.1
-
# means only the tokens comprising the top 10% probability mass are considered.
-
#
-
# We generally recommend altering this or temperature but not both.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float, nil?: true
-
-
# @!method initialize(description: nil, instructions: nil, metadata: nil, model: nil, name: nil, reasoning_effort: nil, response_format: nil, temperature: nil, tool_resources: nil, tools: nil, top_p: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantUpdateParams} for more details.
-
#
-
# @param description [String, nil] The description of the assistant. The maximum length is 512 characters.
-
#
-
# @param instructions [String, nil] The system instructions that the assistant uses. The maximum length is 256,000 c
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param model [String, Symbol, OpenAI::Models::Beta::AssistantUpdateParams::Model] ID of the model to use. You can use the [List models](https://platform.openai.co
-
#
-
# @param name [String, nil] The name of the assistant. The maximum length is 256 characters.
-
#
-
# @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
-
#
-
# @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https:
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param tool_resources [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources, nil] A set of resources that are used by the assistant's tools. The resources are spe
-
#
-
# @param tools [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool>] A list of tool enabled on the assistant. There can be a maximum of 128 tools per
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# ID of the model to use. You can use the
-
# [List models](https://platform.openai.com/docs/api-reference/models/list) API to
-
# see all of your available models, or see our
-
# [Model overview](https://platform.openai.com/docs/models) for descriptions of
-
# them.
-
1
module Model
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1_MINI }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1_NANO }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1_2025_04_14 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1_MINI_2025_04_14 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1_NANO_2025_04_14 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::O3_MINI }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::O3_MINI_2025_01_31 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::O1 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::O1_2024_12_17 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4O }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4O_2024_11_20 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4O_2024_08_06 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4O_2024_05_13 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4O_MINI }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4O_MINI_2024_07_18 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_5_PREVIEW }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_5_PREVIEW_2025_02_27 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_TURBO }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_TURBO_2024_04_09 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_0125_PREVIEW }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_TURBO_PREVIEW }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_1106_PREVIEW }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_VISION_PREVIEW }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_0314 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_0613 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_32K }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_32K_0314 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_4_32K_0613 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_3_5_TURBO }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_3_5_TURBO_16K }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_3_5_TURBO_0613 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_3_5_TURBO_1106 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_3_5_TURBO_0125 }
-
-
1
variant const: -> { OpenAI::Models::Beta::AssistantUpdateParams::Model::GPT_3_5_TURBO_16K_0613 }
-
-
# @!method self.variants
-
# @return [Array(String, Symbol)]
-
-
1
define_sorbet_constant!(:Variants) do
-
T.type_alias { T.any(String, OpenAI::Beta::AssistantUpdateParams::Model::TaggedSymbol) }
-
end
-
-
# @!group
-
-
1
GPT_4_1 = :"gpt-4.1"
-
1
GPT_4_1_MINI = :"gpt-4.1-mini"
-
1
GPT_4_1_NANO = :"gpt-4.1-nano"
-
1
GPT_4_1_2025_04_14 = :"gpt-4.1-2025-04-14"
-
1
GPT_4_1_MINI_2025_04_14 = :"gpt-4.1-mini-2025-04-14"
-
1
GPT_4_1_NANO_2025_04_14 = :"gpt-4.1-nano-2025-04-14"
-
1
O3_MINI = :"o3-mini"
-
1
O3_MINI_2025_01_31 = :"o3-mini-2025-01-31"
-
1
O1 = :o1
-
1
O1_2024_12_17 = :"o1-2024-12-17"
-
1
GPT_4O = :"gpt-4o"
-
1
GPT_4O_2024_11_20 = :"gpt-4o-2024-11-20"
-
1
GPT_4O_2024_08_06 = :"gpt-4o-2024-08-06"
-
1
GPT_4O_2024_05_13 = :"gpt-4o-2024-05-13"
-
1
GPT_4O_MINI = :"gpt-4o-mini"
-
1
GPT_4O_MINI_2024_07_18 = :"gpt-4o-mini-2024-07-18"
-
1
GPT_4_5_PREVIEW = :"gpt-4.5-preview"
-
1
GPT_4_5_PREVIEW_2025_02_27 = :"gpt-4.5-preview-2025-02-27"
-
1
GPT_4_TURBO = :"gpt-4-turbo"
-
1
GPT_4_TURBO_2024_04_09 = :"gpt-4-turbo-2024-04-09"
-
1
GPT_4_0125_PREVIEW = :"gpt-4-0125-preview"
-
1
GPT_4_TURBO_PREVIEW = :"gpt-4-turbo-preview"
-
1
GPT_4_1106_PREVIEW = :"gpt-4-1106-preview"
-
1
GPT_4_VISION_PREVIEW = :"gpt-4-vision-preview"
-
1
GPT_4 = :"gpt-4"
-
1
GPT_4_0314 = :"gpt-4-0314"
-
1
GPT_4_0613 = :"gpt-4-0613"
-
1
GPT_4_32K = :"gpt-4-32k"
-
1
GPT_4_32K_0314 = :"gpt-4-32k-0314"
-
1
GPT_4_32K_0613 = :"gpt-4-32k-0613"
-
1
GPT_3_5_TURBO = :"gpt-3.5-turbo"
-
1
GPT_3_5_TURBO_16K = :"gpt-3.5-turbo-16k"
-
1
GPT_3_5_TURBO_0613 = :"gpt-3.5-turbo-0613"
-
1
GPT_3_5_TURBO_1106 = :"gpt-3.5-turbo-1106"
-
1
GPT_3_5_TURBO_0125 = :"gpt-3.5-turbo-0125"
-
1
GPT_3_5_TURBO_16K_0613 = :"gpt-3.5-turbo-16k-0613"
-
-
# @!endgroup
-
end
-
-
1
class ToolResources < OpenAI::Internal::Type::BaseModel
-
# @!attribute code_interpreter
-
#
-
# @return [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter, nil]
-
1
optional :code_interpreter,
-
-> {
-
OpenAI::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter
-
}
-
-
# @!attribute file_search
-
#
-
# @return [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch, nil]
-
1
optional :file_search, -> { OpenAI::Beta::AssistantUpdateParams::ToolResources::FileSearch }
-
-
# @!method initialize(code_interpreter: nil, file_search: nil)
-
# A set of resources that are used by the assistant's tools. The resources are
-
# specific to the type of tool. For example, the `code_interpreter` tool requires
-
# a list of file IDs, while the `file_search` tool requires a list of vector store
-
# IDs.
-
#
-
# @param code_interpreter [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter]
-
# @param file_search [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch]
-
-
# @see OpenAI::Models::Beta::AssistantUpdateParams::ToolResources#code_interpreter
-
1
class CodeInterpreter < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_ids
-
# Overrides the list of
-
# [file](https://platform.openai.com/docs/api-reference/files) IDs made available
-
# to the `code_interpreter` tool. There can be a maximum of 20 files associated
-
# with the tool.
-
#
-
# @return [Array<String>, nil]
-
1
optional :file_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(file_ids: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::CodeInterpreter}
-
# for more details.
-
#
-
# @param file_ids [Array<String>] Overrides the list of [file](https://platform.openai.com/docs/api-reference/file
-
end
-
-
# @see OpenAI::Models::Beta::AssistantUpdateParams::ToolResources#file_search
-
1
class FileSearch < OpenAI::Internal::Type::BaseModel
-
# @!attribute vector_store_ids
-
# Overrides the
-
# [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
-
# attached to this assistant. There can be a maximum of 1 vector store attached to
-
# the assistant.
-
#
-
# @return [Array<String>, nil]
-
1
optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(vector_store_ids: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantUpdateParams::ToolResources::FileSearch} for
-
# more details.
-
#
-
# @param vector_store_ids [Array<String>] Overrides the [vector store](https://platform.openai.com/docs/api-reference/vect
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
class CodeInterpreterTool < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of tool being defined: `code_interpreter`
-
#
-
# @return [Symbol, :code_interpreter]
-
1
required :type, const: :code_interpreter
-
-
# @!method initialize(type: :code_interpreter)
-
# @param type [Symbol, :code_interpreter] The type of tool being defined: `code_interpreter`
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
class FileSearchTool < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of tool being defined: `file_search`
-
#
-
# @return [Symbol, :file_search]
-
1
required :type, const: :file_search
-
-
# @!attribute file_search
-
# Overrides for the file search tool.
-
#
-
# @return [OpenAI::Models::Beta::FileSearchTool::FileSearch, nil]
-
1
optional :file_search, -> { OpenAI::Beta::FileSearchTool::FileSearch }
-
-
# @!method initialize(file_search: nil, type: :file_search)
-
# @param file_search [OpenAI::Models::Beta::FileSearchTool::FileSearch] Overrides for the file search tool.
-
#
-
# @param type [Symbol, :file_search] The type of tool being defined: `file_search`
-
-
# @see OpenAI::Models::Beta::FileSearchTool#file_search
-
1
class FileSearch < OpenAI::Internal::Type::BaseModel
-
# @!attribute max_num_results
-
# The maximum number of results the file search tool should output. The default is
-
# 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number should be between
-
# 1 and 50 inclusive.
-
#
-
# Note that the file search tool may output fewer than `max_num_results` results.
-
# See the
-
# [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
-
# for more information.
-
#
-
# @return [Integer, nil]
-
1
optional :max_num_results, Integer
-
-
# @!attribute ranking_options
-
# The ranking options for the file search. If not specified, the file search tool
-
# will use the `auto` ranker and a score_threshold of 0.
-
#
-
# See the
-
# [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
-
# for more information.
-
#
-
# @return [OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions, nil]
-
1
optional :ranking_options, -> { OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions }
-
-
# @!method initialize(max_num_results: nil, ranking_options: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::FileSearchTool::FileSearch} for more details.
-
#
-
# Overrides for the file search tool.
-
#
-
# @param max_num_results [Integer] The maximum number of results the file search tool should output. The default is
-
#
-
# @param ranking_options [OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions] The ranking options for the file search. If not specified, the file search tool
-
-
# @see OpenAI::Models::Beta::FileSearchTool::FileSearch#ranking_options
-
1
class RankingOptions < OpenAI::Internal::Type::BaseModel
-
# @!attribute score_threshold
-
# The score threshold for the file search. All values must be a floating point
-
# number between 0 and 1.
-
#
-
# @return [Float]
-
1
required :score_threshold, Float
-
-
# @!attribute ranker
-
# The ranker to use for the file search. If not specified will use the `auto`
-
# ranker.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker, nil]
-
1
optional :ranker, enum: -> { OpenAI::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker }
-
-
# @!method initialize(score_threshold:, ranker: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions} for more
-
# details.
-
#
-
# The ranking options for the file search. If not specified, the file search tool
-
# will use the `auto` ranker and a score_threshold of 0.
-
#
-
# See the
-
# [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
-
# for more information.
-
#
-
# @param score_threshold [Float] The score threshold for the file search. All values must be a floating point num
-
#
-
# @param ranker [Symbol, OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions::Ranker] The ranker to use for the file search. If not specified will use the `auto` rank
-
-
# The ranker to use for the file search. If not specified will use the `auto`
-
# ranker.
-
#
-
# @see OpenAI::Models::Beta::FileSearchTool::FileSearch::RankingOptions#ranker
-
1
module Ranker
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
DEFAULT_2024_08_21 = :default_2024_08_21
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
class FunctionTool < OpenAI::Internal::Type::BaseModel
-
# @!attribute function
-
#
-
# @return [OpenAI::Models::FunctionDefinition]
-
1
required :function, -> { OpenAI::FunctionDefinition }
-
-
# @!attribute type
-
# The type of tool being defined: `function`
-
#
-
# @return [Symbol, :function]
-
1
required :type, const: :function
-
-
# @!method initialize(function:, type: :function)
-
# @param function [OpenAI::Models::FunctionDefinition]
-
#
-
# @param type [Symbol, :function] The type of tool being defined: `function`
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# Occurs when a
-
# [message](https://platform.openai.com/docs/api-reference/messages/object) is
-
# created.
-
1
module MessageStreamEvent
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :event
-
-
# Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is created.
-
1
variant :"thread.message.created", -> { OpenAI::Beta::MessageStreamEvent::ThreadMessageCreated }
-
-
# Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) moves to an `in_progress` state.
-
1
variant :"thread.message.in_progress",
-
-> {
-
OpenAI::Beta::MessageStreamEvent::ThreadMessageInProgress
-
}
-
-
# Occurs when parts of a [Message](https://platform.openai.com/docs/api-reference/messages/object) are being streamed.
-
1
variant :"thread.message.delta", -> { OpenAI::Beta::MessageStreamEvent::ThreadMessageDelta }
-
-
# Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) is completed.
-
1
variant :"thread.message.completed", -> { OpenAI::Beta::MessageStreamEvent::ThreadMessageCompleted }
-
-
# Occurs when a [message](https://platform.openai.com/docs/api-reference/messages/object) ends before it is completed.
-
1
variant :"thread.message.incomplete", -> { OpenAI::Beta::MessageStreamEvent::ThreadMessageIncomplete }
-
-
1
class ThreadMessageCreated < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a message within a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Message]
-
1
required :data, -> { OpenAI::Beta::Threads::Message }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.message.created"]
-
1
required :event, const: :"thread.message.created"
-
-
# @!method initialize(data:, event: :"thread.message.created")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCreated} for more
-
# details.
-
#
-
# Occurs when a
-
# [message](https://platform.openai.com/docs/api-reference/messages/object) is
-
# created.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Message] Represents a message within a [thread](https://platform.openai.com/docs/api-refe
-
#
-
# @param event [Symbol, :"thread.message.created"]
-
end
-
-
1
class ThreadMessageInProgress < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a message within a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Message]
-
1
required :data, -> { OpenAI::Beta::Threads::Message }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.message.in_progress"]
-
1
required :event, const: :"thread.message.in_progress"
-
-
# @!method initialize(data:, event: :"thread.message.in_progress")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageInProgress} for more
-
# details.
-
#
-
# Occurs when a
-
# [message](https://platform.openai.com/docs/api-reference/messages/object) moves
-
# to an `in_progress` state.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Message] Represents a message within a [thread](https://platform.openai.com/docs/api-refe
-
#
-
# @param event [Symbol, :"thread.message.in_progress"]
-
end
-
-
1
class ThreadMessageDelta < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a message delta i.e. any changed fields on a message during
-
# streaming.
-
#
-
# @return [OpenAI::Models::Beta::Threads::MessageDeltaEvent]
-
1
required :data, -> { OpenAI::Beta::Threads::MessageDeltaEvent }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.message.delta"]
-
1
required :event, const: :"thread.message.delta"
-
-
# @!method initialize(data:, event: :"thread.message.delta")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageDelta} for more details.
-
#
-
# Occurs when parts of a
-
# [Message](https://platform.openai.com/docs/api-reference/messages/object) are
-
# being streamed.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::MessageDeltaEvent] Represents a message delta i.e. any changed fields on a message during streaming
-
#
-
# @param event [Symbol, :"thread.message.delta"]
-
end
-
-
1
class ThreadMessageCompleted < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a message within a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Message]
-
1
required :data, -> { OpenAI::Beta::Threads::Message }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.message.completed"]
-
1
required :event, const: :"thread.message.completed"
-
-
# @!method initialize(data:, event: :"thread.message.completed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCompleted} for more
-
# details.
-
#
-
# Occurs when a
-
# [message](https://platform.openai.com/docs/api-reference/messages/object) is
-
# completed.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Message] Represents a message within a [thread](https://platform.openai.com/docs/api-refe
-
#
-
# @param event [Symbol, :"thread.message.completed"]
-
end
-
-
1
class ThreadMessageIncomplete < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a message within a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Message]
-
1
required :data, -> { OpenAI::Beta::Threads::Message }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.message.incomplete"]
-
1
required :event, const: :"thread.message.incomplete"
-
-
# @!method initialize(data:, event: :"thread.message.incomplete")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageIncomplete} for more
-
# details.
-
#
-
# Occurs when a
-
# [message](https://platform.openai.com/docs/api-reference/messages/object) ends
-
# before it is completed.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Message] Represents a message within a [thread](https://platform.openai.com/docs/api-refe
-
#
-
# @param event [Symbol, :"thread.message.incomplete"]
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCreated, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageInProgress, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageDelta, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageCompleted, OpenAI::Models::Beta::MessageStreamEvent::ThreadMessageIncomplete)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# Occurs when a
-
# [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)
-
# is created.
-
1
module RunStepStreamEvent
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :event
-
-
# Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is created.
-
1
variant :"thread.run.step.created", -> { OpenAI::Beta::RunStepStreamEvent::ThreadRunStepCreated }
-
-
# Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) moves to an `in_progress` state.
-
1
variant :"thread.run.step.in_progress",
-
-> {
-
OpenAI::Beta::RunStepStreamEvent::ThreadRunStepInProgress
-
}
-
-
# Occurs when parts of a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) are being streamed.
-
1
variant :"thread.run.step.delta", -> { OpenAI::Beta::RunStepStreamEvent::ThreadRunStepDelta }
-
-
# Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is completed.
-
1
variant :"thread.run.step.completed", -> { OpenAI::Beta::RunStepStreamEvent::ThreadRunStepCompleted }
-
-
# Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) fails.
-
1
variant :"thread.run.step.failed", -> { OpenAI::Beta::RunStepStreamEvent::ThreadRunStepFailed }
-
-
# Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) is cancelled.
-
1
variant :"thread.run.step.cancelled", -> { OpenAI::Beta::RunStepStreamEvent::ThreadRunStepCancelled }
-
-
# Occurs when a [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object) expires.
-
1
variant :"thread.run.step.expired", -> { OpenAI::Beta::RunStepStreamEvent::ThreadRunStepExpired }
-
-
1
class ThreadRunStepCreated < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a step in execution of a run.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStep]
-
1
required :data, -> { OpenAI::Beta::Threads::Runs::RunStep }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.step.created"]
-
1
required :event, const: :"thread.run.step.created"
-
-
# @!method initialize(data:, event: :"thread.run.step.created")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCreated} for more
-
# details.
-
#
-
# Occurs when a
-
# [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)
-
# is created.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run.
-
#
-
# @param event [Symbol, :"thread.run.step.created"]
-
end
-
-
1
class ThreadRunStepInProgress < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a step in execution of a run.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStep]
-
1
required :data, -> { OpenAI::Beta::Threads::Runs::RunStep }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.step.in_progress"]
-
1
required :event, const: :"thread.run.step.in_progress"
-
-
# @!method initialize(data:, event: :"thread.run.step.in_progress")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepInProgress} for more
-
# details.
-
#
-
# Occurs when a
-
# [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)
-
# moves to an `in_progress` state.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run.
-
#
-
# @param event [Symbol, :"thread.run.step.in_progress"]
-
end
-
-
1
class ThreadRunStepDelta < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a run step delta i.e. any changed fields on a run step during
-
# streaming.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent]
-
1
required :data, -> { OpenAI::Beta::Threads::Runs::RunStepDeltaEvent }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.step.delta"]
-
1
required :event, const: :"thread.run.step.delta"
-
-
# @!method initialize(data:, event: :"thread.run.step.delta")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepDelta} for more details.
-
#
-
# Occurs when parts of a
-
# [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)
-
# are being streamed.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaEvent] Represents a run step delta i.e. any changed fields on a run step during streami
-
#
-
# @param event [Symbol, :"thread.run.step.delta"]
-
end
-
-
1
class ThreadRunStepCompleted < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a step in execution of a run.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStep]
-
1
required :data, -> { OpenAI::Beta::Threads::Runs::RunStep }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.step.completed"]
-
1
required :event, const: :"thread.run.step.completed"
-
-
# @!method initialize(data:, event: :"thread.run.step.completed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCompleted} for more
-
# details.
-
#
-
# Occurs when a
-
# [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)
-
# is completed.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run.
-
#
-
# @param event [Symbol, :"thread.run.step.completed"]
-
end
-
-
1
class ThreadRunStepFailed < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a step in execution of a run.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStep]
-
1
required :data, -> { OpenAI::Beta::Threads::Runs::RunStep }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.step.failed"]
-
1
required :event, const: :"thread.run.step.failed"
-
-
# @!method initialize(data:, event: :"thread.run.step.failed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepFailed} for more
-
# details.
-
#
-
# Occurs when a
-
# [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)
-
# fails.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run.
-
#
-
# @param event [Symbol, :"thread.run.step.failed"]
-
end
-
-
1
class ThreadRunStepCancelled < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a step in execution of a run.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStep]
-
1
required :data, -> { OpenAI::Beta::Threads::Runs::RunStep }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.step.cancelled"]
-
1
required :event, const: :"thread.run.step.cancelled"
-
-
# @!method initialize(data:, event: :"thread.run.step.cancelled")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCancelled} for more
-
# details.
-
#
-
# Occurs when a
-
# [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)
-
# is cancelled.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run.
-
#
-
# @param event [Symbol, :"thread.run.step.cancelled"]
-
end
-
-
1
class ThreadRunStepExpired < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a step in execution of a run.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStep]
-
1
required :data, -> { OpenAI::Beta::Threads::Runs::RunStep }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.step.expired"]
-
1
required :event, const: :"thread.run.step.expired"
-
-
# @!method initialize(data:, event: :"thread.run.step.expired")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepExpired} for more
-
# details.
-
#
-
# Occurs when a
-
# [run step](https://platform.openai.com/docs/api-reference/run-steps/step-object)
-
# expires.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Runs::RunStep] Represents a step in execution of a run.
-
#
-
# @param event [Symbol, :"thread.run.step.expired"]
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCreated, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepInProgress, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepDelta, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCompleted, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepFailed, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepCancelled, OpenAI::Models::Beta::RunStepStreamEvent::ThreadRunStepExpired)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# Occurs when a new
-
# [run](https://platform.openai.com/docs/api-reference/runs/object) is created.
-
1
module RunStreamEvent
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :event
-
-
# Occurs when a new [run](https://platform.openai.com/docs/api-reference/runs/object) is created.
-
1
variant :"thread.run.created", -> { OpenAI::Beta::RunStreamEvent::ThreadRunCreated }
-
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `queued` status.
-
1
variant :"thread.run.queued", -> { OpenAI::Beta::RunStreamEvent::ThreadRunQueued }
-
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to an `in_progress` status.
-
1
variant :"thread.run.in_progress", -> { OpenAI::Beta::RunStreamEvent::ThreadRunInProgress }
-
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `requires_action` status.
-
1
variant :"thread.run.requires_action", -> { OpenAI::Beta::RunStreamEvent::ThreadRunRequiresAction }
-
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is completed.
-
1
variant :"thread.run.completed", -> { OpenAI::Beta::RunStreamEvent::ThreadRunCompleted }
-
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) ends with status `incomplete`.
-
1
variant :"thread.run.incomplete", -> { OpenAI::Beta::RunStreamEvent::ThreadRunIncomplete }
-
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) fails.
-
1
variant :"thread.run.failed", -> { OpenAI::Beta::RunStreamEvent::ThreadRunFailed }
-
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) moves to a `cancelling` status.
-
1
variant :"thread.run.cancelling", -> { OpenAI::Beta::RunStreamEvent::ThreadRunCancelling }
-
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) is cancelled.
-
1
variant :"thread.run.cancelled", -> { OpenAI::Beta::RunStreamEvent::ThreadRunCancelled }
-
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object) expires.
-
1
variant :"thread.run.expired", -> { OpenAI::Beta::RunStreamEvent::ThreadRunExpired }
-
-
1
class ThreadRunCreated < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.created"]
-
1
required :event, const: :"thread.run.created"
-
-
# @!method initialize(data:, event: :"thread.run.created")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::RunStreamEvent::ThreadRunCreated} for more details.
-
#
-
# Occurs when a new
-
# [run](https://platform.openai.com/docs/api-reference/runs/object) is created.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.created"]
-
end
-
-
1
class ThreadRunQueued < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.queued"]
-
1
required :event, const: :"thread.run.queued"
-
-
# @!method initialize(data:, event: :"thread.run.queued")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::RunStreamEvent::ThreadRunQueued} for more details.
-
#
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
-
# moves to a `queued` status.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.queued"]
-
end
-
-
1
class ThreadRunInProgress < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.in_progress"]
-
1
required :event, const: :"thread.run.in_progress"
-
-
# @!method initialize(data:, event: :"thread.run.in_progress")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::RunStreamEvent::ThreadRunInProgress} for more details.
-
#
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
-
# moves to an `in_progress` status.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.in_progress"]
-
end
-
-
1
class ThreadRunRequiresAction < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.requires_action"]
-
1
required :event, const: :"thread.run.requires_action"
-
-
# @!method initialize(data:, event: :"thread.run.requires_action")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::RunStreamEvent::ThreadRunRequiresAction} for more
-
# details.
-
#
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
-
# moves to a `requires_action` status.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.requires_action"]
-
end
-
-
1
class ThreadRunCompleted < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.completed"]
-
1
required :event, const: :"thread.run.completed"
-
-
# @!method initialize(data:, event: :"thread.run.completed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::RunStreamEvent::ThreadRunCompleted} for more details.
-
#
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
-
# is completed.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.completed"]
-
end
-
-
1
class ThreadRunIncomplete < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.incomplete"]
-
1
required :event, const: :"thread.run.incomplete"
-
-
# @!method initialize(data:, event: :"thread.run.incomplete")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::RunStreamEvent::ThreadRunIncomplete} for more details.
-
#
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
-
# ends with status `incomplete`.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.incomplete"]
-
end
-
-
1
class ThreadRunFailed < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.failed"]
-
1
required :event, const: :"thread.run.failed"
-
-
# @!method initialize(data:, event: :"thread.run.failed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::RunStreamEvent::ThreadRunFailed} for more details.
-
#
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
-
# fails.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.failed"]
-
end
-
-
1
class ThreadRunCancelling < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.cancelling"]
-
1
required :event, const: :"thread.run.cancelling"
-
-
# @!method initialize(data:, event: :"thread.run.cancelling")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelling} for more details.
-
#
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
-
# moves to a `cancelling` status.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.cancelling"]
-
end
-
-
1
class ThreadRunCancelled < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.cancelled"]
-
1
required :event, const: :"thread.run.cancelled"
-
-
# @!method initialize(data:, event: :"thread.run.cancelled")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelled} for more details.
-
#
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
-
# is cancelled.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.cancelled"]
-
end
-
-
1
class ThreadRunExpired < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
1
required :data, -> { OpenAI::Beta::Threads::Run }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.run.expired"]
-
1
required :event, const: :"thread.run.expired"
-
-
# @!method initialize(data:, event: :"thread.run.expired")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::RunStreamEvent::ThreadRunExpired} for more details.
-
#
-
# Occurs when a [run](https://platform.openai.com/docs/api-reference/runs/object)
-
# expires.
-
#
-
# @param data [OpenAI::Models::Beta::Threads::Run] Represents an execution run on a [thread](https://platform.openai.com/docs/api-r
-
#
-
# @param event [Symbol, :"thread.run.expired"]
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::RunStreamEvent::ThreadRunCreated, OpenAI::Models::Beta::RunStreamEvent::ThreadRunQueued, OpenAI::Models::Beta::RunStreamEvent::ThreadRunInProgress, OpenAI::Models::Beta::RunStreamEvent::ThreadRunRequiresAction, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCompleted, OpenAI::Models::Beta::RunStreamEvent::ThreadRunIncomplete, OpenAI::Models::Beta::RunStreamEvent::ThreadRunFailed, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelling, OpenAI::Models::Beta::RunStreamEvent::ThreadRunCancelled, OpenAI::Models::Beta::RunStreamEvent::ThreadRunExpired)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# @see OpenAI::Resources::Beta::Threads#create
-
1
class Thread < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The identifier, which can be referenced in API endpoints.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the thread was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute object
-
# The object type, which is always `thread`.
-
#
-
# @return [Symbol, :thread]
-
1
required :object, const: :thread
-
-
# @!attribute tool_resources
-
# A set of resources that are made available to the assistant's tools in this
-
# thread. The resources are specific to the type of tool. For example, the
-
# `code_interpreter` tool requires a list of file IDs, while the `file_search`
-
# tool requires a list of vector store IDs.
-
#
-
# @return [OpenAI::Models::Beta::Thread::ToolResources, nil]
-
1
required :tool_resources, -> { OpenAI::Beta::Thread::ToolResources }, nil?: true
-
-
# @!method initialize(id:, created_at:, metadata:, tool_resources:, object: :thread)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Thread} for more details.
-
#
-
# Represents a thread that contains
-
# [messages](https://platform.openai.com/docs/api-reference/messages).
-
#
-
# @param id [String] The identifier, which can be referenced in API endpoints.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the thread was created.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param tool_resources [OpenAI::Models::Beta::Thread::ToolResources, nil] A set of resources that are made available to the assistant's tools in this thre
-
#
-
# @param object [Symbol, :thread] The object type, which is always `thread`.
-
-
# @see OpenAI::Models::Beta::Thread#tool_resources
-
1
class ToolResources < OpenAI::Internal::Type::BaseModel
-
# @!attribute code_interpreter
-
#
-
# @return [OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter, nil]
-
1
optional :code_interpreter, -> { OpenAI::Beta::Thread::ToolResources::CodeInterpreter }
-
-
# @!attribute file_search
-
#
-
# @return [OpenAI::Models::Beta::Thread::ToolResources::FileSearch, nil]
-
1
optional :file_search, -> { OpenAI::Beta::Thread::ToolResources::FileSearch }
-
-
# @!method initialize(code_interpreter: nil, file_search: nil)
-
# A set of resources that are made available to the assistant's tools in this
-
# thread. The resources are specific to the type of tool. For example, the
-
# `code_interpreter` tool requires a list of file IDs, while the `file_search`
-
# tool requires a list of vector store IDs.
-
#
-
# @param code_interpreter [OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter]
-
# @param file_search [OpenAI::Models::Beta::Thread::ToolResources::FileSearch]
-
-
# @see OpenAI::Models::Beta::Thread::ToolResources#code_interpreter
-
1
class CodeInterpreter < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_ids
-
# A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
-
# available to the `code_interpreter` tool. There can be a maximum of 20 files
-
# associated with the tool.
-
#
-
# @return [Array<String>, nil]
-
1
optional :file_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(file_ids: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Thread::ToolResources::CodeInterpreter} for more details.
-
#
-
# @param file_ids [Array<String>] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
-
end
-
-
# @see OpenAI::Models::Beta::Thread::ToolResources#file_search
-
1
class FileSearch < OpenAI::Internal::Type::BaseModel
-
# @!attribute vector_store_ids
-
# The
-
# [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
-
# attached to this thread. There can be a maximum of 1 vector store attached to
-
# the thread.
-
#
-
# @return [Array<String>, nil]
-
1
optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(vector_store_ids: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Thread::ToolResources::FileSearch} for more details.
-
#
-
# @param vector_store_ids [Array<String>] The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# @see OpenAI::Resources::Beta::Threads#create_and_run
-
#
-
# @see OpenAI::Resources::Beta::Threads#stream_raw
-
1
class ThreadCreateAndRunParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute assistant_id
-
# The ID of the
-
# [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
-
# execute this run.
-
#
-
# @return [String]
-
1
required :assistant_id, String
-
-
# @!attribute instructions
-
# Override the default system message of the assistant. This is useful for
-
# modifying the behavior on a per-run basis.
-
#
-
# @return [String, nil]
-
1
optional :instructions, String, nil?: true
-
-
# @!attribute max_completion_tokens
-
# The maximum number of completion tokens that may be used over the course of the
-
# run. The run will make a best effort to use only the number of completion tokens
-
# specified, across multiple turns of the run. If the run exceeds the number of
-
# completion tokens specified, the run will end with status `incomplete`. See
-
# `incomplete_details` for more info.
-
#
-
# @return [Integer, nil]
-
1
optional :max_completion_tokens, Integer, nil?: true
-
-
# @!attribute max_prompt_tokens
-
# The maximum number of prompt tokens that may be used over the course of the run.
-
# The run will make a best effort to use only the number of prompt tokens
-
# specified, across multiple turns of the run. If the run exceeds the number of
-
# prompt tokens specified, the run will end with status `incomplete`. See
-
# `incomplete_details` for more info.
-
#
-
# @return [Integer, nil]
-
1
optional :max_prompt_tokens, Integer, nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute model
-
# The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
-
# be used to execute this run. If a value is provided here, it will override the
-
# model associated with the assistant. If not, the model associated with the
-
# assistant will be used.
-
#
-
# @return [String, Symbol, OpenAI::Models::ChatModel, nil]
-
1
optional :model, union: -> { OpenAI::Beta::ThreadCreateAndRunParams::Model }, nil?: true
-
-
# @!attribute parallel_tool_calls
-
# Whether to enable
-
# [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
-
# during tool use.
-
#
-
# @return [Boolean, nil]
-
1
optional :parallel_tool_calls, OpenAI::Internal::Type::Boolean
-
-
# @!attribute response_format
-
# Specifies the format that the model must output. Compatible with
-
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
-
# [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
-
# and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
-
#
-
# Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
-
# Outputs which ensures the model will match your supplied JSON schema. Learn more
-
# in the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
-
# message the model generates is valid JSON.
-
#
-
# **Important:** when using JSON mode, you **must** also instruct the model to
-
# produce JSON yourself via a system or user message. Without this, the model may
-
# generate an unending stream of whitespace until the generation reaches the token
-
# limit, resulting in a long-running and seemingly "stuck" request. Also note that
-
# the message content may be partially cut off if `finish_reason="length"`, which
-
# indicates the generation exceeded `max_tokens` or the conversation exceeded the
-
# max context length.
-
#
-
# @return [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil]
-
1
optional :response_format, union: -> { OpenAI::Beta::AssistantResponseFormatOption }, nil?: true
-
-
# @!attribute temperature
-
# What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
-
# make the output more random, while lower values like 0.2 will make it more
-
# focused and deterministic.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float, nil?: true
-
-
# @!attribute thread
-
# Options to create a new thread. If no thread is provided when running a request,
-
# an empty thread will be created.
-
#
-
# @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread, nil]
-
1
optional :thread, -> { OpenAI::Beta::ThreadCreateAndRunParams::Thread }
-
-
# @!attribute tool_choice
-
# Controls which (if any) tool is called by the model. `none` means the model will
-
# not call any tools and instead generates a message. `auto` is the default value
-
# and means the model can pick between generating a message or calling one or more
-
# tools. `required` means the model must call one or more tools before responding
-
# to the user. Specifying a particular tool like `{"type": "file_search"}` or
-
# `{"type": "function", "function": {"name": "my_function"}}` forces the model to
-
# call that tool.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil]
-
1
optional :tool_choice, union: -> { OpenAI::Beta::AssistantToolChoiceOption }, nil?: true
-
-
# @!attribute tool_resources
-
# A set of resources that are used by the assistant's tools. The resources are
-
# specific to the type of tool. For example, the `code_interpreter` tool requires
-
# a list of file IDs, while the `file_search` tool requires a list of vector store
-
# IDs.
-
#
-
# @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources, nil]
-
1
optional :tool_resources, -> { OpenAI::Beta::ThreadCreateAndRunParams::ToolResources }, nil?: true
-
-
# @!attribute tools
-
# Override the tools the assistant can use for this run. This is useful for
-
# modifying the behavior on a per-run basis.
-
#
-
# @return [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool>, nil]
-
1
optional :tools,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool]
-
},
-
nil?: true
-
-
# @!attribute top_p
-
# An alternative to sampling with temperature, called nucleus sampling, where the
-
# model considers the results of the tokens with top_p probability mass. So 0.1
-
# means only the tokens comprising the top 10% probability mass are considered.
-
#
-
# We generally recommend altering this or temperature but not both.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float, nil?: true
-
-
# @!attribute truncation_strategy
-
# Controls for how a thread will be truncated prior to the run. Use this to
-
# control the intial context window of the run.
-
#
-
# @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy, nil]
-
1
optional :truncation_strategy,
-
-> { OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy },
-
nil?: true
-
-
# @!method initialize(assistant_id:, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, response_format: nil, temperature: nil, thread: nil, tool_choice: nil, tool_resources: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateAndRunParams} for more details.
-
#
-
# @param assistant_id [String] The ID of the [assistant](https://platform.openai.com/docs/api-reference/assista
-
#
-
# @param instructions [String, nil] Override the default system message of the assistant. This is useful for modifyi
-
#
-
# @param max_completion_tokens [Integer, nil] The maximum number of completion tokens that may be used over the course of the
-
#
-
# @param max_prompt_tokens [Integer, nil] The maximum number of prompt tokens that may be used over the course of the run.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param model [String, Symbol, OpenAI::Models::ChatModel, nil] The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
-
#
-
# @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g
-
#
-
# @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https:
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param thread [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread] Options to create a new thread. If no thread is provided when running a
-
#
-
# @param tool_choice [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] Controls which (if any) tool is called by the model.
-
#
-
# @param tool_resources [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources, nil] A set of resources that are used by the assistant's tools. The resources are spe
-
#
-
# @param tools [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool>, nil] Override the tools the assistant can use for this run. This is useful for modify
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the
-
#
-
# @param truncation_strategy [OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy, nil] Controls for how a thread will be truncated prior to the run. Use this to contro
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
-
# be used to execute this run. If a value is provided here, it will override the
-
# model associated with the assistant. If not, the model associated with the
-
# assistant will be used.
-
1
module Model
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
# The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.
-
1
variant enum: -> { OpenAI::ChatModel }
-
-
# @!method self.variants
-
# @return [Array(String, Symbol, OpenAI::Models::ChatModel)]
-
end
-
-
1
class Thread < OpenAI::Internal::Type::BaseModel
-
# @!attribute messages
-
# A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
-
# start the thread with.
-
#
-
# @return [Array<OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message>, nil]
-
1
optional :messages,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message]
-
}
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute tool_resources
-
# A set of resources that are made available to the assistant's tools in this
-
# thread. The resources are specific to the type of tool. For example, the
-
# `code_interpreter` tool requires a list of file IDs, while the `file_search`
-
# tool requires a list of vector store IDs.
-
#
-
# @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources, nil]
-
1
optional :tool_resources,
-
-> {
-
OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources
-
},
-
nil?: true
-
-
# @!method initialize(messages: nil, metadata: nil, tool_resources: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread} for more details.
-
#
-
# Options to create a new thread. If no thread is provided when running a request,
-
# an empty thread will be created.
-
#
-
# @param messages [Array<OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message>] A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param tool_resources [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources, nil] A set of resources that are made available to the assistant's tools in this thre
-
-
1
class Message < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The text contents of the message.
-
#
-
# @return [String, Array<OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlockParam>]
-
1
required :content, union: -> { OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Content }
-
-
# @!attribute role
-
# The role of the entity that is creating the message. Allowed values include:
-
#
-
# - `user`: Indicates the message is sent by an actual user and should be used in
-
# most cases to represent user-generated messages.
-
# - `assistant`: Indicates the message is generated by the assistant. Use this
-
# value to insert messages from the assistant into the conversation.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Role]
-
1
required :role, enum: -> { OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Role }
-
-
# @!attribute attachments
-
# A list of files attached to the message, and the tools they should be added to.
-
#
-
# @return [Array<OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment>, nil]
-
1
optional :attachments,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment]
-
},
-
nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!method initialize(content:, role:, attachments: nil, metadata: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message} for more
-
# details.
-
#
-
# @param content [String, Array<OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlockParam>] The text contents of the message.
-
#
-
# @param role [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Role] The role of the entity that is creating the message. Allowed values include:
-
#
-
# @param attachments [Array<OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment>, nil] A list of files attached to the message, and the tools they should be added to.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
-
# The text contents of the message.
-
#
-
# @see OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# The text contents of the message.
-
1
variant String
-
-
# An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](https://platform.openai.com/docs/models).
-
1
variant -> { OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Content::MessageContentPartParamArray }
-
-
# @!method self.variants
-
# @return [Array(String, Array<OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlockParam>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
MessageContentPartParamArray =
-
1
OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Beta::Threads::MessageContentPartParam }]
-
end
-
-
# The role of the entity that is creating the message. Allowed values include:
-
#
-
# - `user`: Indicates the message is sent by an actual user and should be used in
-
# most cases to represent user-generated messages.
-
# - `assistant`: Indicates the message is generated by the assistant. Use this
-
# value to insert messages from the assistant into the conversation.
-
#
-
# @see OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message#role
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
USER = :user
-
1
ASSISTANT = :assistant
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
class Attachment < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_id
-
# The ID of the file to attach to the message.
-
#
-
# @return [String, nil]
-
1
optional :file_id, String
-
-
# @!attribute tools
-
# The tools to add this file to.
-
#
-
# @return [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch>, nil]
-
1
optional :tools,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool]
-
}
-
-
# @!method initialize(file_id: nil, tools: nil)
-
# @param file_id [String] The ID of the file to attach to the message.
-
#
-
# @param tools [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch>] The tools to add this file to.
-
-
1
module Tool
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :code_interpreter, -> { OpenAI::Beta::CodeInterpreterTool }
-
-
1
variant :file_search,
-
-> {
-
OpenAI::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch
-
}
-
-
1
class FileSearch < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of tool being defined: `file_search`
-
#
-
# @return [Symbol, :file_search]
-
1
required :type, const: :file_search
-
-
# @!method initialize(type: :file_search)
-
# @param type [Symbol, :file_search] The type of tool being defined: `file_search`
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::Message::Attachment::Tool::FileSearch)]
-
end
-
end
-
end
-
-
# @see OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread#tool_resources
-
1
class ToolResources < OpenAI::Internal::Type::BaseModel
-
# @!attribute code_interpreter
-
#
-
# @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter, nil]
-
1
optional :code_interpreter,
-
-> { OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter }
-
-
# @!attribute file_search
-
#
-
# @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch, nil]
-
1
optional :file_search,
-
-> {
-
OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch
-
}
-
-
# @!method initialize(code_interpreter: nil, file_search: nil)
-
# A set of resources that are made available to the assistant's tools in this
-
# thread. The resources are specific to the type of tool. For example, the
-
# `code_interpreter` tool requires a list of file IDs, while the `file_search`
-
# tool requires a list of vector store IDs.
-
#
-
# @param code_interpreter [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter]
-
# @param file_search [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch]
-
-
# @see OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources#code_interpreter
-
1
class CodeInterpreter < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_ids
-
# A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
-
# available to the `code_interpreter` tool. There can be a maximum of 20 files
-
# associated with the tool.
-
#
-
# @return [Array<String>, nil]
-
1
optional :file_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(file_ids: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::CodeInterpreter}
-
# for more details.
-
#
-
# @param file_ids [Array<String>] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
-
end
-
-
# @see OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources#file_search
-
1
class FileSearch < OpenAI::Internal::Type::BaseModel
-
# @!attribute vector_store_ids
-
# The
-
# [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
-
# attached to this thread. There can be a maximum of 1 vector store attached to
-
# the thread.
-
#
-
# @return [Array<String>, nil]
-
1
optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute vector_stores
-
# A helper to create a
-
# [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
-
# with file_ids and attach it to this thread. There can be a maximum of 1 vector
-
# store attached to the thread.
-
#
-
# @return [Array<OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore>, nil]
-
1
optional :vector_stores,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore]
-
}
-
-
# @!method initialize(vector_store_ids: nil, vector_stores: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch}
-
# for more details.
-
#
-
# @param vector_store_ids [Array<String>] The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/
-
#
-
# @param vector_stores [Array<OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore>] A helper to create a [vector store](https://platform.openai.com/docs/api-referen
-
-
1
class VectorStore < OpenAI::Internal::Type::BaseModel
-
# @!attribute chunking_strategy
-
# The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
# strategy.
-
#
-
# @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static, nil]
-
1
optional :chunking_strategy,
-
union: -> {
-
OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy
-
}
-
-
# @!attribute file_ids
-
# A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
-
# add to the vector store. There can be a maximum of 10000 files in a vector
-
# store.
-
#
-
# @return [Array<String>, nil]
-
1
optional :file_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!method initialize(chunking_strategy: nil, file_ids: nil, metadata: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore}
-
# for more details.
-
#
-
# @param chunking_strategy [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static] The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
#
-
# @param file_ids [Array<String>] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to ad
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
-
# The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
# strategy.
-
#
-
# @see OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore#chunking_strategy
-
1
module ChunkingStrategy
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`.
-
1
variant :auto,
-
-> {
-
OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto
-
}
-
-
1
variant :static,
-
-> {
-
OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static
-
}
-
-
1
class Auto < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# Always `auto`.
-
#
-
# @return [Symbol, :auto]
-
1
required :type, const: :auto
-
-
# @!method initialize(type: :auto)
-
# The default strategy. This strategy currently uses a `max_chunk_size_tokens` of
-
# `800` and `chunk_overlap_tokens` of `400`.
-
#
-
# @param type [Symbol, :auto] Always `auto`.
-
end
-
-
1
class Static < OpenAI::Internal::Type::BaseModel
-
# @!attribute static
-
#
-
# @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static]
-
1
required :static,
-
-> {
-
OpenAI::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static
-
}
-
-
# @!attribute type
-
# Always `static`.
-
#
-
# @return [Symbol, :static]
-
1
required :type, const: :static
-
-
# @!method initialize(static:, type: :static)
-
# @param static [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static]
-
#
-
# @param type [Symbol, :static] Always `static`.
-
-
# @see OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static#static
-
1
class Static < OpenAI::Internal::Type::BaseModel
-
# @!attribute chunk_overlap_tokens
-
# The number of tokens that overlap between chunks. The default value is `400`.
-
#
-
# Note that the overlap must not exceed half of `max_chunk_size_tokens`.
-
#
-
# @return [Integer]
-
1
required :chunk_overlap_tokens, Integer
-
-
# @!attribute max_chunk_size_tokens
-
# The maximum number of tokens in each chunk. The default value is `800`. The
-
# minimum value is `100` and the maximum value is `4096`.
-
#
-
# @return [Integer]
-
1
required :max_chunk_size_tokens, Integer
-
-
# @!method initialize(chunk_overlap_tokens:, max_chunk_size_tokens:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static}
-
# for more details.
-
#
-
# @param chunk_overlap_tokens [Integer] The number of tokens that overlap between chunks. The default value is `400`.
-
#
-
# @param max_chunk_size_tokens [Integer] The maximum number of tokens in each chunk. The default value is `800`. The mini
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static)]
-
end
-
end
-
end
-
end
-
end
-
-
1
class ToolResources < OpenAI::Internal::Type::BaseModel
-
# @!attribute code_interpreter
-
#
-
# @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter, nil]
-
1
optional :code_interpreter,
-
-> {
-
OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter
-
}
-
-
# @!attribute file_search
-
#
-
# @return [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch, nil]
-
1
optional :file_search, -> { OpenAI::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch }
-
-
# @!method initialize(code_interpreter: nil, file_search: nil)
-
# A set of resources that are used by the assistant's tools. The resources are
-
# specific to the type of tool. For example, the `code_interpreter` tool requires
-
# a list of file IDs, while the `file_search` tool requires a list of vector store
-
# IDs.
-
#
-
# @param code_interpreter [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter]
-
# @param file_search [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch]
-
-
# @see OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources#code_interpreter
-
1
class CodeInterpreter < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_ids
-
# A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
-
# available to the `code_interpreter` tool. There can be a maximum of 20 files
-
# associated with the tool.
-
#
-
# @return [Array<String>, nil]
-
1
optional :file_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(file_ids: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::CodeInterpreter}
-
# for more details.
-
#
-
# @param file_ids [Array<String>] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
-
end
-
-
# @see OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources#file_search
-
1
class FileSearch < OpenAI::Internal::Type::BaseModel
-
# @!attribute vector_store_ids
-
# The ID of the
-
# [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
-
# attached to this assistant. There can be a maximum of 1 vector store attached to
-
# the assistant.
-
#
-
# @return [Array<String>, nil]
-
1
optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(vector_store_ids: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources::FileSearch} for
-
# more details.
-
#
-
# @param vector_store_ids [Array<String>] The ID of the [vector store](https://platform.openai.com/docs/api-reference/vect
-
end
-
end
-
-
1
class TruncationStrategy < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The truncation strategy to use for the thread. The default is `auto`. If set to
-
# `last_messages`, the thread will be truncated to the n most recent messages in
-
# the thread. When set to `auto`, messages in the middle of the thread will be
-
# dropped to fit the context length of the model, `max_prompt_tokens`.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type]
-
1
required :type, enum: -> { OpenAI::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type }
-
-
# @!attribute last_messages
-
# The number of most recent messages from the thread when constructing the context
-
# for the run.
-
#
-
# @return [Integer, nil]
-
1
optional :last_messages, Integer, nil?: true
-
-
# @!method initialize(type:, last_messages: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy} for more
-
# details.
-
#
-
# Controls for how a thread will be truncated prior to the run. Use this to
-
# control the intial context window of the run.
-
#
-
# @param type [Symbol, OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy::Type] The truncation strategy to use for the thread. The default is `auto`. If set to
-
#
-
# @param last_messages [Integer, nil] The number of most recent messages from the thread when constructing the context
-
-
# The truncation strategy to use for the thread. The default is `auto`. If set to
-
# `last_messages`, the thread will be truncated to the n most recent messages in
-
# the thread. When set to `auto`, messages in the middle of the thread will be
-
# dropped to fit the context length of the model, `max_prompt_tokens`.
-
#
-
# @see OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
LAST_MESSAGES = :last_messages
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# @see OpenAI::Resources::Beta::Threads#create
-
1
class ThreadCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute messages
-
# A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
-
# start the thread with.
-
#
-
# @return [Array<OpenAI::Models::Beta::ThreadCreateParams::Message>, nil]
-
1
optional :messages, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::ThreadCreateParams::Message] }
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute tool_resources
-
# A set of resources that are made available to the assistant's tools in this
-
# thread. The resources are specific to the type of tool. For example, the
-
# `code_interpreter` tool requires a list of file IDs, while the `file_search`
-
# tool requires a list of vector store IDs.
-
#
-
# @return [OpenAI::Models::Beta::ThreadCreateParams::ToolResources, nil]
-
1
optional :tool_resources, -> { OpenAI::Beta::ThreadCreateParams::ToolResources }, nil?: true
-
-
# @!method initialize(messages: nil, metadata: nil, tool_resources: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateParams} for more details.
-
#
-
# @param messages [Array<OpenAI::Models::Beta::ThreadCreateParams::Message>] A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param tool_resources [OpenAI::Models::Beta::ThreadCreateParams::ToolResources, nil] A set of resources that are made available to the assistant's tools in this thre
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
1
class Message < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The text contents of the message.
-
#
-
# @return [String, Array<OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlockParam>]
-
1
required :content, union: -> { OpenAI::Beta::ThreadCreateParams::Message::Content }
-
-
# @!attribute role
-
# The role of the entity that is creating the message. Allowed values include:
-
#
-
# - `user`: Indicates the message is sent by an actual user and should be used in
-
# most cases to represent user-generated messages.
-
# - `assistant`: Indicates the message is generated by the assistant. Use this
-
# value to insert messages from the assistant into the conversation.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::ThreadCreateParams::Message::Role]
-
1
required :role, enum: -> { OpenAI::Beta::ThreadCreateParams::Message::Role }
-
-
# @!attribute attachments
-
# A list of files attached to the message, and the tools they should be added to.
-
#
-
# @return [Array<OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment>, nil]
-
1
optional :attachments,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::ThreadCreateParams::Message::Attachment]
-
},
-
nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!method initialize(content:, role:, attachments: nil, metadata: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateParams::Message} for more details.
-
#
-
# @param content [String, Array<OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlockParam>] The text contents of the message.
-
#
-
# @param role [Symbol, OpenAI::Models::Beta::ThreadCreateParams::Message::Role] The role of the entity that is creating the message. Allowed values include:
-
#
-
# @param attachments [Array<OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment>, nil] A list of files attached to the message, and the tools they should be added to.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
-
# The text contents of the message.
-
#
-
# @see OpenAI::Models::Beta::ThreadCreateParams::Message#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# The text contents of the message.
-
1
variant String
-
-
# An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](https://platform.openai.com/docs/models).
-
1
variant -> { OpenAI::Models::Beta::ThreadCreateParams::Message::Content::MessageContentPartParamArray }
-
-
# @!method self.variants
-
# @return [Array(String, Array<OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlockParam>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
MessageContentPartParamArray =
-
1
OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Beta::Threads::MessageContentPartParam }]
-
end
-
-
# The role of the entity that is creating the message. Allowed values include:
-
#
-
# - `user`: Indicates the message is sent by an actual user and should be used in
-
# most cases to represent user-generated messages.
-
# - `assistant`: Indicates the message is generated by the assistant. Use this
-
# value to insert messages from the assistant into the conversation.
-
#
-
# @see OpenAI::Models::Beta::ThreadCreateParams::Message#role
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
USER = :user
-
1
ASSISTANT = :assistant
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
class Attachment < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_id
-
# The ID of the file to attach to the message.
-
#
-
# @return [String, nil]
-
1
optional :file_id, String
-
-
# @!attribute tools
-
# The tools to add this file to.
-
#
-
# @return [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch>, nil]
-
1
optional :tools,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::ThreadCreateParams::Message::Attachment::Tool]
-
}
-
-
# @!method initialize(file_id: nil, tools: nil)
-
# @param file_id [String] The ID of the file to attach to the message.
-
#
-
# @param tools [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch>] The tools to add this file to.
-
-
1
module Tool
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :code_interpreter, -> { OpenAI::Beta::CodeInterpreterTool }
-
-
1
variant :file_search,
-
-> {
-
OpenAI::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch
-
}
-
-
1
class FileSearch < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of tool being defined: `file_search`
-
#
-
# @return [Symbol, :file_search]
-
1
required :type, const: :file_search
-
-
# @!method initialize(type: :file_search)
-
# @param type [Symbol, :file_search] The type of tool being defined: `file_search`
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::ThreadCreateParams::Message::Attachment::Tool::FileSearch)]
-
end
-
end
-
end
-
-
1
class ToolResources < OpenAI::Internal::Type::BaseModel
-
# @!attribute code_interpreter
-
#
-
# @return [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter, nil]
-
1
optional :code_interpreter, -> { OpenAI::Beta::ThreadCreateParams::ToolResources::CodeInterpreter }
-
-
# @!attribute file_search
-
#
-
# @return [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch, nil]
-
1
optional :file_search, -> { OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch }
-
-
# @!method initialize(code_interpreter: nil, file_search: nil)
-
# A set of resources that are made available to the assistant's tools in this
-
# thread. The resources are specific to the type of tool. For example, the
-
# `code_interpreter` tool requires a list of file IDs, while the `file_search`
-
# tool requires a list of vector store IDs.
-
#
-
# @param code_interpreter [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter]
-
# @param file_search [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch]
-
-
# @see OpenAI::Models::Beta::ThreadCreateParams::ToolResources#code_interpreter
-
1
class CodeInterpreter < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_ids
-
# A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
-
# available to the `code_interpreter` tool. There can be a maximum of 20 files
-
# associated with the tool.
-
#
-
# @return [Array<String>, nil]
-
1
optional :file_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(file_ids: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateParams::ToolResources::CodeInterpreter} for
-
# more details.
-
#
-
# @param file_ids [Array<String>] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
-
end
-
-
# @see OpenAI::Models::Beta::ThreadCreateParams::ToolResources#file_search
-
1
class FileSearch < OpenAI::Internal::Type::BaseModel
-
# @!attribute vector_store_ids
-
# The
-
# [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
-
# attached to this thread. There can be a maximum of 1 vector store attached to
-
# the thread.
-
#
-
# @return [Array<String>, nil]
-
1
optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute vector_stores
-
# A helper to create a
-
# [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
-
# with file_ids and attach it to this thread. There can be a maximum of 1 vector
-
# store attached to the thread.
-
#
-
# @return [Array<OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore>, nil]
-
1
optional :vector_stores,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore]
-
}
-
-
# @!method initialize(vector_store_ids: nil, vector_stores: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch} for more
-
# details.
-
#
-
# @param vector_store_ids [Array<String>] The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/
-
#
-
# @param vector_stores [Array<OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore>] A helper to create a [vector store](https://platform.openai.com/docs/api-referen
-
-
1
class VectorStore < OpenAI::Internal::Type::BaseModel
-
# @!attribute chunking_strategy
-
# The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
# strategy.
-
#
-
# @return [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static, nil]
-
1
optional :chunking_strategy,
-
union: -> {
-
OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy
-
}
-
-
# @!attribute file_ids
-
# A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
-
# add to the vector store. There can be a maximum of 10000 files in a vector
-
# store.
-
#
-
# @return [Array<String>, nil]
-
1
optional :file_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!method initialize(chunking_strategy: nil, file_ids: nil, metadata: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore}
-
# for more details.
-
#
-
# @param chunking_strategy [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static] The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
#
-
# @param file_ids [Array<String>] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to ad
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
-
# The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
# strategy.
-
#
-
# @see OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore#chunking_strategy
-
1
module ChunkingStrategy
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`.
-
1
variant :auto,
-
-> {
-
OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto
-
}
-
-
1
variant :static,
-
-> {
-
OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static
-
}
-
-
1
class Auto < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# Always `auto`.
-
#
-
# @return [Symbol, :auto]
-
1
required :type, const: :auto
-
-
# @!method initialize(type: :auto)
-
# The default strategy. This strategy currently uses a `max_chunk_size_tokens` of
-
# `800` and `chunk_overlap_tokens` of `400`.
-
#
-
# @param type [Symbol, :auto] Always `auto`.
-
end
-
-
1
class Static < OpenAI::Internal::Type::BaseModel
-
# @!attribute static
-
#
-
# @return [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static]
-
1
required :static,
-
-> {
-
OpenAI::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static
-
}
-
-
# @!attribute type
-
# Always `static`.
-
#
-
# @return [Symbol, :static]
-
1
required :type, const: :static
-
-
# @!method initialize(static:, type: :static)
-
# @param static [OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static]
-
#
-
# @param type [Symbol, :static] Always `static`.
-
-
# @see OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static#static
-
1
class Static < OpenAI::Internal::Type::BaseModel
-
# @!attribute chunk_overlap_tokens
-
# The number of tokens that overlap between chunks. The default value is `400`.
-
#
-
# Note that the overlap must not exceed half of `max_chunk_size_tokens`.
-
#
-
# @return [Integer]
-
1
required :chunk_overlap_tokens, Integer
-
-
# @!attribute max_chunk_size_tokens
-
# The maximum number of tokens in each chunk. The default value is `800`. The
-
# minimum value is `100` and the maximum value is `4096`.
-
#
-
# @return [Integer]
-
1
required :max_chunk_size_tokens, Integer
-
-
# @!method initialize(chunk_overlap_tokens:, max_chunk_size_tokens:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static::Static}
-
# for more details.
-
#
-
# @param chunk_overlap_tokens [Integer] The number of tokens that overlap between chunks. The default value is `400`.
-
#
-
# @param max_chunk_size_tokens [Integer] The maximum number of tokens in each chunk. The default value is `800`. The mini
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Auto, OpenAI::Models::Beta::ThreadCreateParams::ToolResources::FileSearch::VectorStore::ChunkingStrategy::Static)]
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# @see OpenAI::Resources::Beta::Threads#delete
-
1
class ThreadDeleteParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# @see OpenAI::Resources::Beta::Threads#delete
-
1
class ThreadDeleted < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute deleted
-
#
-
# @return [Boolean]
-
1
required :deleted, OpenAI::Internal::Type::Boolean
-
-
# @!attribute object
-
#
-
# @return [Symbol, :"thread.deleted"]
-
1
required :object, const: :"thread.deleted"
-
-
# @!method initialize(id:, deleted:, object: :"thread.deleted")
-
# @param id [String]
-
# @param deleted [Boolean]
-
# @param object [Symbol, :"thread.deleted"]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# @see OpenAI::Resources::Beta::Threads#retrieve
-
1
class ThreadRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
class ThreadStreamEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Represents a thread that contains
-
# [messages](https://platform.openai.com/docs/api-reference/messages).
-
#
-
# @return [OpenAI::Models::Beta::Thread]
-
1
required :data, -> { OpenAI::Beta::Thread }
-
-
# @!attribute event
-
#
-
# @return [Symbol, :"thread.created"]
-
1
required :event, const: :"thread.created"
-
-
# @!attribute enabled
-
# Whether to enable input audio transcription.
-
#
-
# @return [Boolean, nil]
-
1
optional :enabled, OpenAI::Internal::Type::Boolean
-
-
# @!method initialize(data:, enabled: nil, event: :"thread.created")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadStreamEvent} for more details.
-
#
-
# Occurs when a new
-
# [thread](https://platform.openai.com/docs/api-reference/threads/object) is
-
# created.
-
#
-
# @param data [OpenAI::Models::Beta::Thread] Represents a thread that contains [messages](https://platform.openai.com/docs/ap
-
#
-
# @param enabled [Boolean] Whether to enable input audio transcription.
-
#
-
# @param event [Symbol, :"thread.created"]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
# @see OpenAI::Resources::Beta::Threads#update
-
1
class ThreadUpdateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute tool_resources
-
# A set of resources that are made available to the assistant's tools in this
-
# thread. The resources are specific to the type of tool. For example, the
-
# `code_interpreter` tool requires a list of file IDs, while the `file_search`
-
# tool requires a list of vector store IDs.
-
#
-
# @return [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources, nil]
-
1
optional :tool_resources, -> { OpenAI::Beta::ThreadUpdateParams::ToolResources }, nil?: true
-
-
# @!method initialize(metadata: nil, tool_resources: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadUpdateParams} for more details.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param tool_resources [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources, nil] A set of resources that are made available to the assistant's tools in this thre
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
1
class ToolResources < OpenAI::Internal::Type::BaseModel
-
# @!attribute code_interpreter
-
#
-
# @return [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter, nil]
-
1
optional :code_interpreter, -> { OpenAI::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter }
-
-
# @!attribute file_search
-
#
-
# @return [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch, nil]
-
1
optional :file_search, -> { OpenAI::Beta::ThreadUpdateParams::ToolResources::FileSearch }
-
-
# @!method initialize(code_interpreter: nil, file_search: nil)
-
# A set of resources that are made available to the assistant's tools in this
-
# thread. The resources are specific to the type of tool. For example, the
-
# `code_interpreter` tool requires a list of file IDs, while the `file_search`
-
# tool requires a list of vector store IDs.
-
#
-
# @param code_interpreter [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter]
-
# @param file_search [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch]
-
-
# @see OpenAI::Models::Beta::ThreadUpdateParams::ToolResources#code_interpreter
-
1
class CodeInterpreter < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_ids
-
# A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
-
# available to the `code_interpreter` tool. There can be a maximum of 20 files
-
# associated with the tool.
-
#
-
# @return [Array<String>, nil]
-
1
optional :file_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(file_ids: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::CodeInterpreter} for
-
# more details.
-
#
-
# @param file_ids [Array<String>] A list of [file](https://platform.openai.com/docs/api-reference/files) IDs made
-
end
-
-
# @see OpenAI::Models::Beta::ThreadUpdateParams::ToolResources#file_search
-
1
class FileSearch < OpenAI::Internal::Type::BaseModel
-
# @!attribute vector_store_ids
-
# The
-
# [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
-
# attached to this thread. There can be a maximum of 1 vector store attached to
-
# the thread.
-
#
-
# @return [Array<String>, nil]
-
1
optional :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(vector_store_ids: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadUpdateParams::ToolResources::FileSearch} for more
-
# details.
-
#
-
# @param vector_store_ids [Array<String>] The [vector store](https://platform.openai.com/docs/api-reference/vector-stores/
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# A citation within the message that points to a specific quote from a specific
-
# File associated with the assistant or the message. Generated when the assistant
-
# uses the "file_search" tool to search files.
-
1
module Annotation
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files.
-
1
variant :file_citation, -> { OpenAI::Beta::Threads::FileCitationAnnotation }
-
-
# A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file.
-
1
variant :file_path, -> { OpenAI::Beta::Threads::FilePathAnnotation }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::Threads::FileCitationAnnotation, OpenAI::Models::Beta::Threads::FilePathAnnotation)]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# A citation within the message that points to a specific quote from a specific
-
# File associated with the assistant or the message. Generated when the assistant
-
# uses the "file_search" tool to search files.
-
1
module AnnotationDelta
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files.
-
1
variant :file_citation, -> { OpenAI::Beta::Threads::FileCitationDeltaAnnotation }
-
-
# A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file.
-
1
variant :file_path, -> { OpenAI::Beta::Threads::FilePathDeltaAnnotation }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation, OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation)]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class FileCitationAnnotation < OpenAI::Internal::Type::BaseModel
-
# @!attribute end_index
-
#
-
# @return [Integer]
-
1
required :end_index, Integer
-
-
# @!attribute file_citation
-
#
-
# @return [OpenAI::Models::Beta::Threads::FileCitationAnnotation::FileCitation]
-
1
required :file_citation, -> { OpenAI::Beta::Threads::FileCitationAnnotation::FileCitation }
-
-
# @!attribute start_index
-
#
-
# @return [Integer]
-
1
required :start_index, Integer
-
-
# @!attribute text
-
# The text in the message content that needs to be replaced.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# Always `file_citation`.
-
#
-
# @return [Symbol, :file_citation]
-
1
required :type, const: :file_citation
-
-
# @!method initialize(end_index:, file_citation:, start_index:, text:, type: :file_citation)
-
# A citation within the message that points to a specific quote from a specific
-
# File associated with the assistant or the message. Generated when the assistant
-
# uses the "file_search" tool to search files.
-
#
-
# @param end_index [Integer]
-
#
-
# @param file_citation [OpenAI::Models::Beta::Threads::FileCitationAnnotation::FileCitation]
-
#
-
# @param start_index [Integer]
-
#
-
# @param text [String] The text in the message content that needs to be replaced.
-
#
-
# @param type [Symbol, :file_citation] Always `file_citation`.
-
-
# @see OpenAI::Models::Beta::Threads::FileCitationAnnotation#file_citation
-
1
class FileCitation < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_id
-
# The ID of the specific File the citation is from.
-
#
-
# @return [String]
-
1
required :file_id, String
-
-
# @!method initialize(file_id:)
-
# @param file_id [String] The ID of the specific File the citation is from.
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class FileCitationDeltaAnnotation < OpenAI::Internal::Type::BaseModel
-
# @!attribute index
-
# The index of the annotation in the text content part.
-
#
-
# @return [Integer]
-
1
required :index, Integer
-
-
# @!attribute type
-
# Always `file_citation`.
-
#
-
# @return [Symbol, :file_citation]
-
1
required :type, const: :file_citation
-
-
# @!attribute end_index
-
#
-
# @return [Integer, nil]
-
1
optional :end_index, Integer
-
-
# @!attribute file_citation
-
#
-
# @return [OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::FileCitation, nil]
-
1
optional :file_citation, -> { OpenAI::Beta::Threads::FileCitationDeltaAnnotation::FileCitation }
-
-
# @!attribute start_index
-
#
-
# @return [Integer, nil]
-
1
optional :start_index, Integer
-
-
# @!attribute text
-
# The text in the message content that needs to be replaced.
-
#
-
# @return [String, nil]
-
1
optional :text, String
-
-
# @!method initialize(index:, end_index: nil, file_citation: nil, start_index: nil, text: nil, type: :file_citation)
-
# A citation within the message that points to a specific quote from a specific
-
# File associated with the assistant or the message. Generated when the assistant
-
# uses the "file_search" tool to search files.
-
#
-
# @param index [Integer] The index of the annotation in the text content part.
-
#
-
# @param end_index [Integer]
-
#
-
# @param file_citation [OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation::FileCitation]
-
#
-
# @param start_index [Integer]
-
#
-
# @param text [String] The text in the message content that needs to be replaced.
-
#
-
# @param type [Symbol, :file_citation] Always `file_citation`.
-
-
# @see OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation#file_citation
-
1
class FileCitation < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_id
-
# The ID of the specific File the citation is from.
-
#
-
# @return [String, nil]
-
1
optional :file_id, String
-
-
# @!attribute quote
-
# The specific quote in the file.
-
#
-
# @return [String, nil]
-
1
optional :quote, String
-
-
# @!method initialize(file_id: nil, quote: nil)
-
# @param file_id [String] The ID of the specific File the citation is from.
-
#
-
# @param quote [String] The specific quote in the file.
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class FilePathAnnotation < OpenAI::Internal::Type::BaseModel
-
# @!attribute end_index
-
#
-
# @return [Integer]
-
1
required :end_index, Integer
-
-
# @!attribute file_path
-
#
-
# @return [OpenAI::Models::Beta::Threads::FilePathAnnotation::FilePath]
-
1
required :file_path, -> { OpenAI::Beta::Threads::FilePathAnnotation::FilePath }
-
-
# @!attribute start_index
-
#
-
# @return [Integer]
-
1
required :start_index, Integer
-
-
# @!attribute text
-
# The text in the message content that needs to be replaced.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# Always `file_path`.
-
#
-
# @return [Symbol, :file_path]
-
1
required :type, const: :file_path
-
-
# @!method initialize(end_index:, file_path:, start_index:, text:, type: :file_path)
-
# A URL for the file that's generated when the assistant used the
-
# `code_interpreter` tool to generate a file.
-
#
-
# @param end_index [Integer]
-
#
-
# @param file_path [OpenAI::Models::Beta::Threads::FilePathAnnotation::FilePath]
-
#
-
# @param start_index [Integer]
-
#
-
# @param text [String] The text in the message content that needs to be replaced.
-
#
-
# @param type [Symbol, :file_path] Always `file_path`.
-
-
# @see OpenAI::Models::Beta::Threads::FilePathAnnotation#file_path
-
1
class FilePath < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_id
-
# The ID of the file that was generated.
-
#
-
# @return [String]
-
1
required :file_id, String
-
-
# @!method initialize(file_id:)
-
# @param file_id [String] The ID of the file that was generated.
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class FilePathDeltaAnnotation < OpenAI::Internal::Type::BaseModel
-
# @!attribute index
-
# The index of the annotation in the text content part.
-
#
-
# @return [Integer]
-
1
required :index, Integer
-
-
# @!attribute type
-
# Always `file_path`.
-
#
-
# @return [Symbol, :file_path]
-
1
required :type, const: :file_path
-
-
# @!attribute end_index
-
#
-
# @return [Integer, nil]
-
1
optional :end_index, Integer
-
-
# @!attribute file_path
-
#
-
# @return [OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::FilePath, nil]
-
1
optional :file_path, -> { OpenAI::Beta::Threads::FilePathDeltaAnnotation::FilePath }
-
-
# @!attribute start_index
-
#
-
# @return [Integer, nil]
-
1
optional :start_index, Integer
-
-
# @!attribute text
-
# The text in the message content that needs to be replaced.
-
#
-
# @return [String, nil]
-
1
optional :text, String
-
-
# @!method initialize(index:, end_index: nil, file_path: nil, start_index: nil, text: nil, type: :file_path)
-
# A URL for the file that's generated when the assistant used the
-
# `code_interpreter` tool to generate a file.
-
#
-
# @param index [Integer] The index of the annotation in the text content part.
-
#
-
# @param end_index [Integer]
-
#
-
# @param file_path [OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation::FilePath]
-
#
-
# @param start_index [Integer]
-
#
-
# @param text [String] The text in the message content that needs to be replaced.
-
#
-
# @param type [Symbol, :file_path] Always `file_path`.
-
-
# @see OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation#file_path
-
1
class FilePath < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_id
-
# The ID of the file that was generated.
-
#
-
# @return [String, nil]
-
1
optional :file_id, String
-
-
# @!method initialize(file_id: nil)
-
# @param file_id [String] The ID of the file that was generated.
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class ImageFile < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_id
-
# The [File](https://platform.openai.com/docs/api-reference/files) ID of the image
-
# in the message content. Set `purpose="vision"` when uploading the File if you
-
# need to later display the file content.
-
#
-
# @return [String]
-
1
required :file_id, String
-
-
# @!attribute detail
-
# Specifies the detail level of the image if specified by the user. `low` uses
-
# fewer tokens, you can opt in to high resolution using `high`.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::ImageFile::Detail, nil]
-
1
optional :detail, enum: -> { OpenAI::Beta::Threads::ImageFile::Detail }
-
-
# @!method initialize(file_id:, detail: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::ImageFile} for more details.
-
#
-
# @param file_id [String] The [File](https://platform.openai.com/docs/api-reference/files) ID of the image
-
#
-
# @param detail [Symbol, OpenAI::Models::Beta::Threads::ImageFile::Detail] Specifies the detail level of the image if specified by the user. `low` uses few
-
-
# Specifies the detail level of the image if specified by the user. `low` uses
-
# fewer tokens, you can opt in to high resolution using `high`.
-
#
-
# @see OpenAI::Models::Beta::Threads::ImageFile#detail
-
1
module Detail
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
LOW = :low
-
1
HIGH = :high
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class ImageFileContentBlock < OpenAI::Internal::Type::BaseModel
-
# @!attribute image_file
-
#
-
# @return [OpenAI::Models::Beta::Threads::ImageFile]
-
1
required :image_file, -> { OpenAI::Beta::Threads::ImageFile }
-
-
# @!attribute type
-
# Always `image_file`.
-
#
-
# @return [Symbol, :image_file]
-
1
required :type, const: :image_file
-
-
# @!method initialize(image_file:, type: :image_file)
-
# References an image [File](https://platform.openai.com/docs/api-reference/files)
-
# in the content of a message.
-
#
-
# @param image_file [OpenAI::Models::Beta::Threads::ImageFile]
-
#
-
# @param type [Symbol, :image_file] Always `image_file`.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class ImageFileDelta < OpenAI::Internal::Type::BaseModel
-
# @!attribute detail
-
# Specifies the detail level of the image if specified by the user. `low` uses
-
# fewer tokens, you can opt in to high resolution using `high`.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::ImageFileDelta::Detail, nil]
-
1
optional :detail, enum: -> { OpenAI::Beta::Threads::ImageFileDelta::Detail }
-
-
# @!attribute file_id
-
# The [File](https://platform.openai.com/docs/api-reference/files) ID of the image
-
# in the message content. Set `purpose="vision"` when uploading the File if you
-
# need to later display the file content.
-
#
-
# @return [String, nil]
-
1
optional :file_id, String
-
-
# @!method initialize(detail: nil, file_id: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::ImageFileDelta} for more details.
-
#
-
# @param detail [Symbol, OpenAI::Models::Beta::Threads::ImageFileDelta::Detail] Specifies the detail level of the image if specified by the user. `low` uses few
-
#
-
# @param file_id [String] The [File](https://platform.openai.com/docs/api-reference/files) ID of the image
-
-
# Specifies the detail level of the image if specified by the user. `low` uses
-
# fewer tokens, you can opt in to high resolution using `high`.
-
#
-
# @see OpenAI::Models::Beta::Threads::ImageFileDelta#detail
-
1
module Detail
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
LOW = :low
-
1
HIGH = :high
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class ImageFileDeltaBlock < OpenAI::Internal::Type::BaseModel
-
# @!attribute index
-
# The index of the content part in the message.
-
#
-
# @return [Integer]
-
1
required :index, Integer
-
-
# @!attribute type
-
# Always `image_file`.
-
#
-
# @return [Symbol, :image_file]
-
1
required :type, const: :image_file
-
-
# @!attribute image_file
-
#
-
# @return [OpenAI::Models::Beta::Threads::ImageFileDelta, nil]
-
1
optional :image_file, -> { OpenAI::Beta::Threads::ImageFileDelta }
-
-
# @!method initialize(index:, image_file: nil, type: :image_file)
-
# References an image [File](https://platform.openai.com/docs/api-reference/files)
-
# in the content of a message.
-
#
-
# @param index [Integer] The index of the content part in the message.
-
#
-
# @param image_file [OpenAI::Models::Beta::Threads::ImageFileDelta]
-
#
-
# @param type [Symbol, :image_file] Always `image_file`.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class ImageURL < OpenAI::Internal::Type::BaseModel
-
# @!attribute url
-
# The external URL of the image, must be a supported image types: jpeg, jpg, png,
-
# gif, webp.
-
#
-
# @return [String]
-
1
required :url, String
-
-
# @!attribute detail
-
# Specifies the detail level of the image. `low` uses fewer tokens, you can opt in
-
# to high resolution using `high`. Default value is `auto`
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::ImageURL::Detail, nil]
-
1
optional :detail, enum: -> { OpenAI::Beta::Threads::ImageURL::Detail }
-
-
# @!method initialize(url:, detail: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::ImageURL} for more details.
-
#
-
# @param url [String] The external URL of the image, must be a supported image types: jpeg, jpg, png,
-
#
-
# @param detail [Symbol, OpenAI::Models::Beta::Threads::ImageURL::Detail] Specifies the detail level of the image. `low` uses fewer tokens, you can opt in
-
-
# Specifies the detail level of the image. `low` uses fewer tokens, you can opt in
-
# to high resolution using `high`. Default value is `auto`
-
#
-
# @see OpenAI::Models::Beta::Threads::ImageURL#detail
-
1
module Detail
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
LOW = :low
-
1
HIGH = :high
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class ImageURLContentBlock < OpenAI::Internal::Type::BaseModel
-
# @!attribute image_url
-
#
-
# @return [OpenAI::Models::Beta::Threads::ImageURL]
-
1
required :image_url, -> { OpenAI::Beta::Threads::ImageURL }
-
-
# @!attribute type
-
# The type of the content part.
-
#
-
# @return [Symbol, :image_url]
-
1
required :type, const: :image_url
-
-
# @!method initialize(image_url:, type: :image_url)
-
# References an image URL in the content of a message.
-
#
-
# @param image_url [OpenAI::Models::Beta::Threads::ImageURL]
-
#
-
# @param type [Symbol, :image_url] The type of the content part.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class ImageURLDelta < OpenAI::Internal::Type::BaseModel
-
# @!attribute detail
-
# Specifies the detail level of the image. `low` uses fewer tokens, you can opt in
-
# to high resolution using `high`.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::ImageURLDelta::Detail, nil]
-
1
optional :detail, enum: -> { OpenAI::Beta::Threads::ImageURLDelta::Detail }
-
-
# @!attribute url
-
# The URL of the image, must be a supported image types: jpeg, jpg, png, gif,
-
# webp.
-
#
-
# @return [String, nil]
-
1
optional :url, String
-
-
# @!method initialize(detail: nil, url: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::ImageURLDelta} for more details.
-
#
-
# @param detail [Symbol, OpenAI::Models::Beta::Threads::ImageURLDelta::Detail] Specifies the detail level of the image. `low` uses fewer tokens, you can opt in
-
#
-
# @param url [String] The URL of the image, must be a supported image types: jpeg, jpg, png, gif, webp
-
-
# Specifies the detail level of the image. `low` uses fewer tokens, you can opt in
-
# to high resolution using `high`.
-
#
-
# @see OpenAI::Models::Beta::Threads::ImageURLDelta#detail
-
1
module Detail
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
LOW = :low
-
1
HIGH = :high
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class ImageURLDeltaBlock < OpenAI::Internal::Type::BaseModel
-
# @!attribute index
-
# The index of the content part in the message.
-
#
-
# @return [Integer]
-
1
required :index, Integer
-
-
# @!attribute type
-
# Always `image_url`.
-
#
-
# @return [Symbol, :image_url]
-
1
required :type, const: :image_url
-
-
# @!attribute image_url
-
#
-
# @return [OpenAI::Models::Beta::Threads::ImageURLDelta, nil]
-
1
optional :image_url, -> { OpenAI::Beta::Threads::ImageURLDelta }
-
-
# @!method initialize(index:, image_url: nil, type: :image_url)
-
# References an image URL in the content of a message.
-
#
-
# @param index [Integer] The index of the content part in the message.
-
#
-
# @param image_url [OpenAI::Models::Beta::Threads::ImageURLDelta]
-
#
-
# @param type [Symbol, :image_url] Always `image_url`.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# @see OpenAI::Resources::Beta::Threads::Messages#create
-
1
class Message < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The identifier, which can be referenced in API endpoints.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute assistant_id
-
# If applicable, the ID of the
-
# [assistant](https://platform.openai.com/docs/api-reference/assistants) that
-
# authored this message.
-
#
-
# @return [String, nil]
-
1
required :assistant_id, String, nil?: true
-
-
# @!attribute attachments
-
# A list of files attached to the message, and the tools they were added to.
-
#
-
# @return [Array<OpenAI::Models::Beta::Threads::Message::Attachment>, nil]
-
1
required :attachments,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::Message::Attachment] },
-
nil?: true
-
-
# @!attribute completed_at
-
# The Unix timestamp (in seconds) for when the message was completed.
-
#
-
# @return [Integer, nil]
-
1
required :completed_at, Integer, nil?: true
-
-
# @!attribute content
-
# The content of the message in array of text and/or images.
-
#
-
# @return [Array<OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlock, OpenAI::Models::Beta::Threads::RefusalContentBlock>]
-
1
required :content,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::MessageContent]
-
}
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the message was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute incomplete_at
-
# The Unix timestamp (in seconds) for when the message was marked as incomplete.
-
#
-
# @return [Integer, nil]
-
1
required :incomplete_at, Integer, nil?: true
-
-
# @!attribute incomplete_details
-
# On an incomplete message, details about why the message is incomplete.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Message::IncompleteDetails, nil]
-
1
required :incomplete_details, -> { OpenAI::Beta::Threads::Message::IncompleteDetails }, nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute object
-
# The object type, which is always `thread.message`.
-
#
-
# @return [Symbol, :"thread.message"]
-
1
required :object, const: :"thread.message"
-
-
# @!attribute role
-
# The entity that produced the message. One of `user` or `assistant`.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::Message::Role]
-
1
required :role, enum: -> { OpenAI::Beta::Threads::Message::Role }
-
-
# @!attribute run_id
-
# The ID of the [run](https://platform.openai.com/docs/api-reference/runs)
-
# associated with the creation of this message. Value is `null` when messages are
-
# created manually using the create message or create thread endpoints.
-
#
-
# @return [String, nil]
-
1
required :run_id, String, nil?: true
-
-
# @!attribute status
-
# The status of the message, which can be either `in_progress`, `incomplete`, or
-
# `completed`.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::Message::Status]
-
1
required :status, enum: -> { OpenAI::Beta::Threads::Message::Status }
-
-
# @!attribute thread_id
-
# The [thread](https://platform.openai.com/docs/api-reference/threads) ID that
-
# this message belongs to.
-
#
-
# @return [String]
-
1
required :thread_id, String
-
-
# @!method initialize(id:, assistant_id:, attachments:, completed_at:, content:, created_at:, incomplete_at:, incomplete_details:, metadata:, role:, run_id:, status:, thread_id:, object: :"thread.message")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Message} for more details.
-
#
-
# Represents a message within a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @param id [String] The identifier, which can be referenced in API endpoints.
-
#
-
# @param assistant_id [String, nil] If applicable, the ID of the [assistant](https://platform.openai.com/docs/api-re
-
#
-
# @param attachments [Array<OpenAI::Models::Beta::Threads::Message::Attachment>, nil] A list of files attached to the message, and the tools they were added to.
-
#
-
# @param completed_at [Integer, nil] The Unix timestamp (in seconds) for when the message was completed.
-
#
-
# @param content [Array<OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlock, OpenAI::Models::Beta::Threads::RefusalContentBlock>] The content of the message in array of text and/or images.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the message was created.
-
#
-
# @param incomplete_at [Integer, nil] The Unix timestamp (in seconds) for when the message was marked as incomplete.
-
#
-
# @param incomplete_details [OpenAI::Models::Beta::Threads::Message::IncompleteDetails, nil] On an incomplete message, details about why the message is incomplete.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param role [Symbol, OpenAI::Models::Beta::Threads::Message::Role] The entity that produced the message. One of `user` or `assistant`.
-
#
-
# @param run_id [String, nil] The ID of the [run](https://platform.openai.com/docs/api-reference/runs) associa
-
#
-
# @param status [Symbol, OpenAI::Models::Beta::Threads::Message::Status] The status of the message, which can be either `in_progress`, `incomplete`, or `
-
#
-
# @param thread_id [String] The [thread](https://platform.openai.com/docs/api-reference/threads) ID that thi
-
#
-
# @param object [Symbol, :"thread.message"] The object type, which is always `thread.message`.
-
-
1
class Attachment < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_id
-
# The ID of the file to attach to the message.
-
#
-
# @return [String, nil]
-
1
optional :file_id, String
-
-
# @!attribute tools
-
# The tools to add this file to.
-
#
-
# @return [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly>, nil]
-
1
optional :tools,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::Message::Attachment::Tool]
-
}
-
-
# @!method initialize(file_id: nil, tools: nil)
-
# @param file_id [String] The ID of the file to attach to the message.
-
#
-
# @param tools [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly>] The tools to add this file to.
-
-
1
module Tool
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant -> { OpenAI::Beta::CodeInterpreterTool }
-
-
1
variant -> {
-
OpenAI::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly
-
}
-
-
1
class AssistantToolsFileSearchTypeOnly < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of tool being defined: `file_search`
-
#
-
# @return [Symbol, :file_search]
-
1
required :type, const: :file_search
-
-
# @!method initialize(type: :file_search)
-
# @param type [Symbol, :file_search] The type of tool being defined: `file_search`
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::Threads::Message::Attachment::Tool::AssistantToolsFileSearchTypeOnly)]
-
end
-
end
-
-
# @see OpenAI::Models::Beta::Threads::Message#incomplete_details
-
1
class IncompleteDetails < OpenAI::Internal::Type::BaseModel
-
# @!attribute reason
-
# The reason the message is incomplete.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::Message::IncompleteDetails::Reason]
-
1
required :reason, enum: -> { OpenAI::Beta::Threads::Message::IncompleteDetails::Reason }
-
-
# @!method initialize(reason:)
-
# On an incomplete message, details about why the message is incomplete.
-
#
-
# @param reason [Symbol, OpenAI::Models::Beta::Threads::Message::IncompleteDetails::Reason] The reason the message is incomplete.
-
-
# The reason the message is incomplete.
-
#
-
# @see OpenAI::Models::Beta::Threads::Message::IncompleteDetails#reason
-
1
module Reason
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
CONTENT_FILTER = :content_filter
-
1
MAX_TOKENS = :max_tokens
-
1
RUN_CANCELLED = :run_cancelled
-
1
RUN_EXPIRED = :run_expired
-
1
RUN_FAILED = :run_failed
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
# The entity that produced the message. One of `user` or `assistant`.
-
#
-
# @see OpenAI::Models::Beta::Threads::Message#role
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
USER = :user
-
1
ASSISTANT = :assistant
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The status of the message, which can be either `in_progress`, `incomplete`, or
-
# `completed`.
-
#
-
# @see OpenAI::Models::Beta::Threads::Message#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
INCOMPLETE = :incomplete
-
1
COMPLETED = :completed
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# References an image [File](https://platform.openai.com/docs/api-reference/files)
-
# in the content of a message.
-
1
module MessageContent
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message.
-
1
variant :image_file, -> { OpenAI::Beta::Threads::ImageFileContentBlock }
-
-
# References an image URL in the content of a message.
-
1
variant :image_url, -> { OpenAI::Beta::Threads::ImageURLContentBlock }
-
-
# The text content that is part of a message.
-
1
variant :text, -> { OpenAI::Beta::Threads::TextContentBlock }
-
-
# The refusal content generated by the assistant.
-
1
variant :refusal, -> { OpenAI::Beta::Threads::RefusalContentBlock }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlock, OpenAI::Models::Beta::Threads::RefusalContentBlock)]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# References an image [File](https://platform.openai.com/docs/api-reference/files)
-
# in the content of a message.
-
1
module MessageContentDelta
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message.
-
1
variant :image_file, -> { OpenAI::Beta::Threads::ImageFileDeltaBlock }
-
-
# The text content that is part of a message.
-
1
variant :text, -> { OpenAI::Beta::Threads::TextDeltaBlock }
-
-
# The refusal content that is part of a message.
-
1
variant :refusal, -> { OpenAI::Beta::Threads::RefusalDeltaBlock }
-
-
# References an image URL in the content of a message.
-
1
variant :image_url, -> { OpenAI::Beta::Threads::ImageURLDeltaBlock }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::Threads::ImageFileDeltaBlock, OpenAI::Models::Beta::Threads::TextDeltaBlock, OpenAI::Models::Beta::Threads::RefusalDeltaBlock, OpenAI::Models::Beta::Threads::ImageURLDeltaBlock)]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# References an image [File](https://platform.openai.com/docs/api-reference/files)
-
# in the content of a message.
-
1
module MessageContentPartParam
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# References an image [File](https://platform.openai.com/docs/api-reference/files) in the content of a message.
-
1
variant :image_file, -> { OpenAI::Beta::Threads::ImageFileContentBlock }
-
-
# References an image URL in the content of a message.
-
1
variant :image_url, -> { OpenAI::Beta::Threads::ImageURLContentBlock }
-
-
# The text content that is part of a message.
-
1
variant :text, -> { OpenAI::Beta::Threads::TextContentBlockParam }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlockParam)]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# @see OpenAI::Resources::Beta::Threads::Messages#create
-
1
class MessageCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute content
-
# The text contents of the message.
-
#
-
# @return [String, Array<OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlockParam>]
-
1
required :content, union: -> { OpenAI::Beta::Threads::MessageCreateParams::Content }
-
-
# @!attribute role
-
# The role of the entity that is creating the message. Allowed values include:
-
#
-
# - `user`: Indicates the message is sent by an actual user and should be used in
-
# most cases to represent user-generated messages.
-
# - `assistant`: Indicates the message is generated by the assistant. Use this
-
# value to insert messages from the assistant into the conversation.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::MessageCreateParams::Role]
-
1
required :role, enum: -> { OpenAI::Beta::Threads::MessageCreateParams::Role }
-
-
# @!attribute attachments
-
# A list of files attached to the message, and the tools they should be added to.
-
#
-
# @return [Array<OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment>, nil]
-
1
optional :attachments,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::MessageCreateParams::Attachment]
-
},
-
nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!method initialize(content:, role:, attachments: nil, metadata: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::MessageCreateParams} for more details.
-
#
-
# @param content [String, Array<OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlockParam>] The text contents of the message.
-
#
-
# @param role [Symbol, OpenAI::Models::Beta::Threads::MessageCreateParams::Role] The role of the entity that is creating the message. Allowed values include:
-
#
-
# @param attachments [Array<OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment>, nil] A list of files attached to the message, and the tools they should be added to.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# The text contents of the message.
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# The text contents of the message.
-
1
variant String
-
-
# An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](https://platform.openai.com/docs/models).
-
1
variant -> { OpenAI::Models::Beta::Threads::MessageCreateParams::Content::MessageContentPartParamArray }
-
-
# @!method self.variants
-
# @return [Array(String, Array<OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlockParam>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
MessageContentPartParamArray =
-
1
OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Beta::Threads::MessageContentPartParam }]
-
end
-
-
# The role of the entity that is creating the message. Allowed values include:
-
#
-
# - `user`: Indicates the message is sent by an actual user and should be used in
-
# most cases to represent user-generated messages.
-
# - `assistant`: Indicates the message is generated by the assistant. Use this
-
# value to insert messages from the assistant into the conversation.
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
USER = :user
-
1
ASSISTANT = :assistant
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
class Attachment < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_id
-
# The ID of the file to attach to the message.
-
#
-
# @return [String, nil]
-
1
optional :file_id, String
-
-
# @!attribute tools
-
# The tools to add this file to.
-
#
-
# @return [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch>, nil]
-
1
optional :tools,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::MessageCreateParams::Attachment::Tool]
-
}
-
-
# @!method initialize(file_id: nil, tools: nil)
-
# @param file_id [String] The ID of the file to attach to the message.
-
#
-
# @param tools [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch>] The tools to add this file to.
-
-
1
module Tool
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :code_interpreter, -> { OpenAI::Beta::CodeInterpreterTool }
-
-
1
variant :file_search,
-
-> {
-
OpenAI::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch
-
}
-
-
1
class FileSearch < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of tool being defined: `file_search`
-
#
-
# @return [Symbol, :file_search]
-
1
required :type, const: :file_search
-
-
# @!method initialize(type: :file_search)
-
# @param type [Symbol, :file_search] The type of tool being defined: `file_search`
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment::Tool::FileSearch)]
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# @see OpenAI::Resources::Beta::Threads::Messages#delete
-
1
class MessageDeleteParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute thread_id
-
#
-
# @return [String]
-
1
required :thread_id, String
-
-
# @!method initialize(thread_id:, request_options: {})
-
# @param thread_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# @see OpenAI::Resources::Beta::Threads::Messages#delete
-
1
class MessageDeleted < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute deleted
-
#
-
# @return [Boolean]
-
1
required :deleted, OpenAI::Internal::Type::Boolean
-
-
# @!attribute object
-
#
-
# @return [Symbol, :"thread.message.deleted"]
-
1
required :object, const: :"thread.message.deleted"
-
-
# @!method initialize(id:, deleted:, object: :"thread.message.deleted")
-
# @param id [String]
-
# @param deleted [Boolean]
-
# @param object [Symbol, :"thread.message.deleted"]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class MessageDelta < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content of the message in array of text and/or images.
-
#
-
# @return [Array<OpenAI::Models::Beta::Threads::ImageFileDeltaBlock, OpenAI::Models::Beta::Threads::TextDeltaBlock, OpenAI::Models::Beta::Threads::RefusalDeltaBlock, OpenAI::Models::Beta::Threads::ImageURLDeltaBlock>, nil]
-
1
optional :content,
-
-> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::MessageContentDelta] }
-
-
# @!attribute role
-
# The entity that produced the message. One of `user` or `assistant`.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::MessageDelta::Role, nil]
-
1
optional :role, enum: -> { OpenAI::Beta::Threads::MessageDelta::Role }
-
-
# @!method initialize(content: nil, role: nil)
-
# The delta containing the fields that have changed on the Message.
-
#
-
# @param content [Array<OpenAI::Models::Beta::Threads::ImageFileDeltaBlock, OpenAI::Models::Beta::Threads::TextDeltaBlock, OpenAI::Models::Beta::Threads::RefusalDeltaBlock, OpenAI::Models::Beta::Threads::ImageURLDeltaBlock>] The content of the message in array of text and/or images.
-
#
-
# @param role [Symbol, OpenAI::Models::Beta::Threads::MessageDelta::Role] The entity that produced the message. One of `user` or `assistant`.
-
-
# The entity that produced the message. One of `user` or `assistant`.
-
#
-
# @see OpenAI::Models::Beta::Threads::MessageDelta#role
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
USER = :user
-
1
ASSISTANT = :assistant
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class MessageDeltaEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The identifier of the message, which can be referenced in API endpoints.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute delta
-
# The delta containing the fields that have changed on the Message.
-
#
-
# @return [OpenAI::Models::Beta::Threads::MessageDelta]
-
1
required :delta, -> { OpenAI::Beta::Threads::MessageDelta }
-
-
# @!attribute object
-
# The object type, which is always `thread.message.delta`.
-
#
-
# @return [Symbol, :"thread.message.delta"]
-
1
required :object, const: :"thread.message.delta"
-
-
# @!method initialize(id:, delta:, object: :"thread.message.delta")
-
# Represents a message delta i.e. any changed fields on a message during
-
# streaming.
-
#
-
# @param id [String] The identifier of the message, which can be referenced in API endpoints.
-
#
-
# @param delta [OpenAI::Models::Beta::Threads::MessageDelta] The delta containing the fields that have changed on the Message.
-
#
-
# @param object [Symbol, :"thread.message.delta"] The object type, which is always `thread.message.delta`.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# @see OpenAI::Resources::Beta::Threads::Messages#list
-
1
class MessageListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute after
-
# A cursor for use in pagination. `after` is an object ID that defines your place
-
# in the list. For instance, if you make a list request and receive 100 objects,
-
# ending with obj_foo, your subsequent call can include after=obj_foo in order to
-
# fetch the next page of the list.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute before
-
# A cursor for use in pagination. `before` is an object ID that defines your place
-
# in the list. For instance, if you make a list request and receive 100 objects,
-
# starting with obj_foo, your subsequent call can include before=obj_foo in order
-
# to fetch the previous page of the list.
-
#
-
# @return [String, nil]
-
1
optional :before, String
-
-
# @!attribute limit
-
# A limit on the number of objects to be returned. Limit can range between 1 and
-
# 100, and the default is 20.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!attribute order
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::MessageListParams::Order, nil]
-
1
optional :order, enum: -> { OpenAI::Beta::Threads::MessageListParams::Order }
-
-
# @!attribute run_id
-
# Filter messages by the run ID that generated them.
-
#
-
# @return [String, nil]
-
1
optional :run_id, String
-
-
# @!method initialize(after: nil, before: nil, limit: nil, order: nil, run_id: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::MessageListParams} for more details.
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param order [Symbol, OpenAI::Models::Beta::Threads::MessageListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord
-
#
-
# @param run_id [String] Filter messages by the run ID that generated them.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
1
module Order
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASC = :asc
-
1
DESC = :desc
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# @see OpenAI::Resources::Beta::Threads::Messages#retrieve
-
1
class MessageRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute thread_id
-
#
-
# @return [String]
-
1
required :thread_id, String
-
-
# @!method initialize(thread_id:, request_options: {})
-
# @param thread_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# @see OpenAI::Resources::Beta::Threads::Messages#update
-
1
class MessageUpdateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute thread_id
-
#
-
# @return [String]
-
1
required :thread_id, String
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!method initialize(thread_id:, metadata: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::MessageUpdateParams} for more details.
-
#
-
# @param thread_id [String]
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class RefusalContentBlock < OpenAI::Internal::Type::BaseModel
-
# @!attribute refusal
-
#
-
# @return [String]
-
1
required :refusal, String
-
-
# @!attribute type
-
# Always `refusal`.
-
#
-
# @return [Symbol, :refusal]
-
1
required :type, const: :refusal
-
-
# @!method initialize(refusal:, type: :refusal)
-
# The refusal content generated by the assistant.
-
#
-
# @param refusal [String]
-
#
-
# @param type [Symbol, :refusal] Always `refusal`.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class RefusalDeltaBlock < OpenAI::Internal::Type::BaseModel
-
# @!attribute index
-
# The index of the refusal part in the message.
-
#
-
# @return [Integer]
-
1
required :index, Integer
-
-
# @!attribute type
-
# Always `refusal`.
-
#
-
# @return [Symbol, :refusal]
-
1
required :type, const: :refusal
-
-
# @!attribute refusal
-
#
-
# @return [String, nil]
-
1
optional :refusal, String
-
-
# @!method initialize(index:, refusal: nil, type: :refusal)
-
# The refusal content that is part of a message.
-
#
-
# @param index [Integer] The index of the refusal part in the message.
-
#
-
# @param refusal [String]
-
#
-
# @param type [Symbol, :refusal] Always `refusal`.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class RequiredActionFunctionToolCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The ID of the tool call. This ID must be referenced when you submit the tool
-
# outputs in using the
-
# [Submit tool outputs to run](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
-
# endpoint.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute function
-
# The function definition.
-
#
-
# @return [OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall::Function]
-
1
required :function, -> { OpenAI::Beta::Threads::RequiredActionFunctionToolCall::Function }
-
-
# @!attribute type
-
# The type of tool call the output is required for. For now, this is always
-
# `function`.
-
#
-
# @return [Symbol, :function]
-
1
required :type, const: :function
-
-
# @!method initialize(id:, function:, type: :function)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall} for more
-
# details.
-
#
-
# Tool call objects
-
#
-
# @param id [String] The ID of the tool call. This ID must be referenced when you submit the tool out
-
#
-
# @param function [OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall::Function] The function definition.
-
#
-
# @param type [Symbol, :function] The type of tool call the output is required for. For now, this is always `funct
-
-
# @see OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall#function
-
1
class Function < OpenAI::Internal::Type::BaseModel
-
# @!attribute arguments
-
# The arguments that the model expects you to pass to the function.
-
#
-
# @return [String]
-
1
required :arguments, String
-
-
# @!attribute name
-
# The name of the function.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!method initialize(arguments:, name:)
-
# The function definition.
-
#
-
# @param arguments [String] The arguments that the model expects you to pass to the function.
-
#
-
# @param name [String] The name of the function.
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# @see OpenAI::Resources::Beta::Threads::Runs#create
-
#
-
# @see OpenAI::Resources::Beta::Threads::Runs#create_stream_raw
-
1
class Run < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The identifier, which can be referenced in API endpoints.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute assistant_id
-
# The ID of the
-
# [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
-
# execution of this run.
-
#
-
# @return [String]
-
1
required :assistant_id, String
-
-
# @!attribute cancelled_at
-
# The Unix timestamp (in seconds) for when the run was cancelled.
-
#
-
# @return [Integer, nil]
-
1
required :cancelled_at, Integer, nil?: true
-
-
# @!attribute completed_at
-
# The Unix timestamp (in seconds) for when the run was completed.
-
#
-
# @return [Integer, nil]
-
1
required :completed_at, Integer, nil?: true
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the run was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute expires_at
-
# The Unix timestamp (in seconds) for when the run will expire.
-
#
-
# @return [Integer, nil]
-
1
required :expires_at, Integer, nil?: true
-
-
# @!attribute failed_at
-
# The Unix timestamp (in seconds) for when the run failed.
-
#
-
# @return [Integer, nil]
-
1
required :failed_at, Integer, nil?: true
-
-
# @!attribute incomplete_details
-
# Details on why the run is incomplete. Will be `null` if the run is not
-
# incomplete.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run::IncompleteDetails, nil]
-
1
required :incomplete_details, -> { OpenAI::Beta::Threads::Run::IncompleteDetails }, nil?: true
-
-
# @!attribute instructions
-
# The instructions that the
-
# [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
-
# this run.
-
#
-
# @return [String]
-
1
required :instructions, String
-
-
# @!attribute last_error
-
# The last error associated with this run. Will be `null` if there are no errors.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run::LastError, nil]
-
1
required :last_error, -> { OpenAI::Beta::Threads::Run::LastError }, nil?: true
-
-
# @!attribute max_completion_tokens
-
# The maximum number of completion tokens specified to have been used over the
-
# course of the run.
-
#
-
# @return [Integer, nil]
-
1
required :max_completion_tokens, Integer, nil?: true
-
-
# @!attribute max_prompt_tokens
-
# The maximum number of prompt tokens specified to have been used over the course
-
# of the run.
-
#
-
# @return [Integer, nil]
-
1
required :max_prompt_tokens, Integer, nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute model
-
# The model that the
-
# [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
-
# this run.
-
#
-
# @return [String]
-
1
required :model, String
-
-
# @!attribute object
-
# The object type, which is always `thread.run`.
-
#
-
# @return [Symbol, :"thread.run"]
-
1
required :object, const: :"thread.run"
-
-
# @!attribute parallel_tool_calls
-
# Whether to enable
-
# [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
-
# during tool use.
-
#
-
# @return [Boolean]
-
1
required :parallel_tool_calls, OpenAI::Internal::Type::Boolean
-
-
# @!attribute required_action
-
# Details on the action required to continue the run. Will be `null` if no action
-
# is required.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run::RequiredAction, nil]
-
1
required :required_action, -> { OpenAI::Beta::Threads::Run::RequiredAction }, nil?: true
-
-
# @!attribute response_format
-
# Specifies the format that the model must output. Compatible with
-
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
-
# [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
-
# and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
-
#
-
# Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
-
# Outputs which ensures the model will match your supplied JSON schema. Learn more
-
# in the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
-
# message the model generates is valid JSON.
-
#
-
# **Important:** when using JSON mode, you **must** also instruct the model to
-
# produce JSON yourself via a system or user message. Without this, the model may
-
# generate an unending stream of whitespace until the generation reaches the token
-
# limit, resulting in a long-running and seemingly "stuck" request. Also note that
-
# the message content may be partially cut off if `finish_reason="length"`, which
-
# indicates the generation exceeded `max_tokens` or the conversation exceeded the
-
# max context length.
-
#
-
# @return [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil]
-
1
required :response_format, union: -> { OpenAI::Beta::AssistantResponseFormatOption }, nil?: true
-
-
# @!attribute started_at
-
# The Unix timestamp (in seconds) for when the run was started.
-
#
-
# @return [Integer, nil]
-
1
required :started_at, Integer, nil?: true
-
-
# @!attribute status
-
# The status of the run, which can be either `queued`, `in_progress`,
-
# `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
-
# `incomplete`, or `expired`.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::RunStatus]
-
1
required :status, enum: -> { OpenAI::Beta::Threads::RunStatus }
-
-
# @!attribute thread_id
-
# The ID of the [thread](https://platform.openai.com/docs/api-reference/threads)
-
# that was executed on as a part of this run.
-
#
-
# @return [String]
-
1
required :thread_id, String
-
-
# @!attribute tool_choice
-
# Controls which (if any) tool is called by the model. `none` means the model will
-
# not call any tools and instead generates a message. `auto` is the default value
-
# and means the model can pick between generating a message or calling one or more
-
# tools. `required` means the model must call one or more tools before responding
-
# to the user. Specifying a particular tool like `{"type": "file_search"}` or
-
# `{"type": "function", "function": {"name": "my_function"}}` forces the model to
-
# call that tool.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil]
-
1
required :tool_choice, union: -> { OpenAI::Beta::AssistantToolChoiceOption }, nil?: true
-
-
# @!attribute tools
-
# The list of tools that the
-
# [assistant](https://platform.openai.com/docs/api-reference/assistants) used for
-
# this run.
-
#
-
# @return [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool>]
-
1
required :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool] }
-
-
# @!attribute truncation_strategy
-
# Controls for how a thread will be truncated prior to the run. Use this to
-
# control the intial context window of the run.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run::TruncationStrategy, nil]
-
1
required :truncation_strategy, -> { OpenAI::Beta::Threads::Run::TruncationStrategy }, nil?: true
-
-
# @!attribute usage
-
# Usage statistics related to the run. This value will be `null` if the run is not
-
# in a terminal state (i.e. `in_progress`, `queued`, etc.).
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run::Usage, nil]
-
1
required :usage, -> { OpenAI::Beta::Threads::Run::Usage }, nil?: true
-
-
# @!attribute temperature
-
# The sampling temperature used for this run. If not set, defaults to 1.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float, nil?: true
-
-
# @!attribute top_p
-
# The nucleus sampling value used for this run. If not set, defaults to 1.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float, nil?: true
-
-
# @!method initialize(id:, assistant_id:, cancelled_at:, completed_at:, created_at:, expires_at:, failed_at:, incomplete_details:, instructions:, last_error:, max_completion_tokens:, max_prompt_tokens:, metadata:, model:, parallel_tool_calls:, required_action:, response_format:, started_at:, status:, thread_id:, tool_choice:, tools:, truncation_strategy:, usage:, temperature: nil, top_p: nil, object: :"thread.run")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Run} for more details.
-
#
-
# Represents an execution run on a
-
# [thread](https://platform.openai.com/docs/api-reference/threads).
-
#
-
# @param id [String] The identifier, which can be referenced in API endpoints.
-
#
-
# @param assistant_id [String] The ID of the [assistant](https://platform.openai.com/docs/api-reference/assista
-
#
-
# @param cancelled_at [Integer, nil] The Unix timestamp (in seconds) for when the run was cancelled.
-
#
-
# @param completed_at [Integer, nil] The Unix timestamp (in seconds) for when the run was completed.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the run was created.
-
#
-
# @param expires_at [Integer, nil] The Unix timestamp (in seconds) for when the run will expire.
-
#
-
# @param failed_at [Integer, nil] The Unix timestamp (in seconds) for when the run failed.
-
#
-
# @param incomplete_details [OpenAI::Models::Beta::Threads::Run::IncompleteDetails, nil] Details on why the run is incomplete. Will be `null` if the run is not incomplet
-
#
-
# @param instructions [String] The instructions that the [assistant](https://platform.openai.com/docs/api-refer
-
#
-
# @param last_error [OpenAI::Models::Beta::Threads::Run::LastError, nil] The last error associated with this run. Will be `null` if there are no errors.
-
#
-
# @param max_completion_tokens [Integer, nil] The maximum number of completion tokens specified to have been used over the cou
-
#
-
# @param max_prompt_tokens [Integer, nil] The maximum number of prompt tokens specified to have been used over the course
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param model [String] The model that the [assistant](https://platform.openai.com/docs/api-reference/as
-
#
-
# @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g
-
#
-
# @param required_action [OpenAI::Models::Beta::Threads::Run::RequiredAction, nil] Details on the action required to continue the run. Will be `null` if no action
-
#
-
# @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https:
-
#
-
# @param started_at [Integer, nil] The Unix timestamp (in seconds) for when the run was started.
-
#
-
# @param status [Symbol, OpenAI::Models::Beta::Threads::RunStatus] The status of the run, which can be either `queued`, `in_progress`, `requires_ac
-
#
-
# @param thread_id [String] The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) t
-
#
-
# @param tool_choice [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] Controls which (if any) tool is called by the model.
-
#
-
# @param tools [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool>] The list of tools that the [assistant](https://platform.openai.com/docs/api-refe
-
#
-
# @param truncation_strategy [OpenAI::Models::Beta::Threads::Run::TruncationStrategy, nil] Controls for how a thread will be truncated prior to the run. Use this to contro
-
#
-
# @param usage [OpenAI::Models::Beta::Threads::Run::Usage, nil] Usage statistics related to the run. This value will be `null` if the run is not
-
#
-
# @param temperature [Float, nil] The sampling temperature used for this run. If not set, defaults to 1.
-
#
-
# @param top_p [Float, nil] The nucleus sampling value used for this run. If not set, defaults to 1.
-
#
-
# @param object [Symbol, :"thread.run"] The object type, which is always `thread.run`.
-
-
# @see OpenAI::Models::Beta::Threads::Run#incomplete_details
-
1
class IncompleteDetails < OpenAI::Internal::Type::BaseModel
-
# @!attribute reason
-
# The reason why the run is incomplete. This will point to which specific token
-
# limit was reached over the course of the run.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::Run::IncompleteDetails::Reason, nil]
-
1
optional :reason, enum: -> { OpenAI::Beta::Threads::Run::IncompleteDetails::Reason }
-
-
# @!method initialize(reason: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Run::IncompleteDetails} for more details.
-
#
-
# Details on why the run is incomplete. Will be `null` if the run is not
-
# incomplete.
-
#
-
# @param reason [Symbol, OpenAI::Models::Beta::Threads::Run::IncompleteDetails::Reason] The reason why the run is incomplete. This will point to which specific token li
-
-
# The reason why the run is incomplete. This will point to which specific token
-
# limit was reached over the course of the run.
-
#
-
# @see OpenAI::Models::Beta::Threads::Run::IncompleteDetails#reason
-
1
module Reason
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
MAX_COMPLETION_TOKENS = :max_completion_tokens
-
1
MAX_PROMPT_TOKENS = :max_prompt_tokens
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
# @see OpenAI::Models::Beta::Threads::Run#last_error
-
1
class LastError < OpenAI::Internal::Type::BaseModel
-
# @!attribute code
-
# One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::Run::LastError::Code]
-
1
required :code, enum: -> { OpenAI::Beta::Threads::Run::LastError::Code }
-
-
# @!attribute message
-
# A human-readable description of the error.
-
#
-
# @return [String]
-
1
required :message, String
-
-
# @!method initialize(code:, message:)
-
# The last error associated with this run. Will be `null` if there are no errors.
-
#
-
# @param code [Symbol, OpenAI::Models::Beta::Threads::Run::LastError::Code] One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.
-
#
-
# @param message [String] A human-readable description of the error.
-
-
# One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`.
-
#
-
# @see OpenAI::Models::Beta::Threads::Run::LastError#code
-
1
module Code
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
SERVER_ERROR = :server_error
-
1
RATE_LIMIT_EXCEEDED = :rate_limit_exceeded
-
1
INVALID_PROMPT = :invalid_prompt
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
# @see OpenAI::Models::Beta::Threads::Run#required_action
-
1
class RequiredAction < OpenAI::Internal::Type::BaseModel
-
# @!attribute submit_tool_outputs
-
# Details on the tool outputs needed for this run to continue.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs]
-
1
required :submit_tool_outputs,
-
-> {
-
OpenAI::Beta::Threads::Run::RequiredAction::SubmitToolOutputs
-
}
-
-
# @!attribute type
-
# For now, this is always `submit_tool_outputs`.
-
#
-
# @return [Symbol, :submit_tool_outputs]
-
1
required :type, const: :submit_tool_outputs
-
-
# @!method initialize(submit_tool_outputs:, type: :submit_tool_outputs)
-
# Details on the action required to continue the run. Will be `null` if no action
-
# is required.
-
#
-
# @param submit_tool_outputs [OpenAI::Models::Beta::Threads::Run::RequiredAction::SubmitToolOutputs] Details on the tool outputs needed for this run to continue.
-
#
-
# @param type [Symbol, :submit_tool_outputs] For now, this is always `submit_tool_outputs`.
-
-
# @see OpenAI::Models::Beta::Threads::Run::RequiredAction#submit_tool_outputs
-
1
class SubmitToolOutputs < OpenAI::Internal::Type::BaseModel
-
# @!attribute tool_calls
-
# A list of the relevant tool calls.
-
#
-
# @return [Array<OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall>]
-
1
required :tool_calls,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::RequiredActionFunctionToolCall]
-
}
-
-
# @!method initialize(tool_calls:)
-
# Details on the tool outputs needed for this run to continue.
-
#
-
# @param tool_calls [Array<OpenAI::Models::Beta::Threads::RequiredActionFunctionToolCall>] A list of the relevant tool calls.
-
end
-
end
-
-
# @see OpenAI::Models::Beta::Threads::Run#truncation_strategy
-
1
class TruncationStrategy < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The truncation strategy to use for the thread. The default is `auto`. If set to
-
# `last_messages`, the thread will be truncated to the n most recent messages in
-
# the thread. When set to `auto`, messages in the middle of the thread will be
-
# dropped to fit the context length of the model, `max_prompt_tokens`.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::Run::TruncationStrategy::Type]
-
1
required :type, enum: -> { OpenAI::Beta::Threads::Run::TruncationStrategy::Type }
-
-
# @!attribute last_messages
-
# The number of most recent messages from the thread when constructing the context
-
# for the run.
-
#
-
# @return [Integer, nil]
-
1
optional :last_messages, Integer, nil?: true
-
-
# @!method initialize(type:, last_messages: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Run::TruncationStrategy} for more details.
-
#
-
# Controls for how a thread will be truncated prior to the run. Use this to
-
# control the intial context window of the run.
-
#
-
# @param type [Symbol, OpenAI::Models::Beta::Threads::Run::TruncationStrategy::Type] The truncation strategy to use for the thread. The default is `auto`. If set to
-
#
-
# @param last_messages [Integer, nil] The number of most recent messages from the thread when constructing the context
-
-
# The truncation strategy to use for the thread. The default is `auto`. If set to
-
# `last_messages`, the thread will be truncated to the n most recent messages in
-
# the thread. When set to `auto`, messages in the middle of the thread will be
-
# dropped to fit the context length of the model, `max_prompt_tokens`.
-
#
-
# @see OpenAI::Models::Beta::Threads::Run::TruncationStrategy#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
LAST_MESSAGES = :last_messages
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
# @see OpenAI::Models::Beta::Threads::Run#usage
-
1
class Usage < OpenAI::Internal::Type::BaseModel
-
# @!attribute completion_tokens
-
# Number of completion tokens used over the course of the run.
-
#
-
# @return [Integer]
-
1
required :completion_tokens, Integer
-
-
# @!attribute prompt_tokens
-
# Number of prompt tokens used over the course of the run.
-
#
-
# @return [Integer]
-
1
required :prompt_tokens, Integer
-
-
# @!attribute total_tokens
-
# Total number of tokens used (prompt + completion).
-
#
-
# @return [Integer]
-
1
required :total_tokens, Integer
-
-
# @!method initialize(completion_tokens:, prompt_tokens:, total_tokens:)
-
# Usage statistics related to the run. This value will be `null` if the run is not
-
# in a terminal state (i.e. `in_progress`, `queued`, etc.).
-
#
-
# @param completion_tokens [Integer] Number of completion tokens used over the course of the run.
-
#
-
# @param prompt_tokens [Integer] Number of prompt tokens used over the course of the run.
-
#
-
# @param total_tokens [Integer] Total number of tokens used (prompt + completion).
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# @see OpenAI::Resources::Beta::Threads::Runs#cancel
-
1
class RunCancelParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute thread_id
-
#
-
# @return [String]
-
1
required :thread_id, String
-
-
# @!method initialize(thread_id:, request_options: {})
-
# @param thread_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# @see OpenAI::Resources::Beta::Threads::Runs#create
-
#
-
# @see OpenAI::Resources::Beta::Threads::Runs#create_stream_raw
-
1
class RunCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute assistant_id
-
# The ID of the
-
# [assistant](https://platform.openai.com/docs/api-reference/assistants) to use to
-
# execute this run.
-
#
-
# @return [String]
-
1
required :assistant_id, String
-
-
# @!attribute include
-
# A list of additional fields to include in the response. Currently the only
-
# supported value is `step_details.tool_calls[*].file_search.results[*].content`
-
# to fetch the file search result content.
-
#
-
# See the
-
# [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
-
# for more information.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Beta::Threads::Runs::RunStepInclude>, nil]
-
1
optional :include,
-
-> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Beta::Threads::Runs::RunStepInclude] }
-
-
# @!attribute additional_instructions
-
# Appends additional instructions at the end of the instructions for the run. This
-
# is useful for modifying the behavior on a per-run basis without overriding other
-
# instructions.
-
#
-
# @return [String, nil]
-
1
optional :additional_instructions, String, nil?: true
-
-
# @!attribute additional_messages
-
# Adds additional messages to the thread before creating the run.
-
#
-
# @return [Array<OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage>, nil]
-
1
optional :additional_messages,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage]
-
},
-
nil?: true
-
-
# @!attribute instructions
-
# Overrides the
-
# [instructions](https://platform.openai.com/docs/api-reference/assistants/createAssistant)
-
# of the assistant. This is useful for modifying the behavior on a per-run basis.
-
#
-
# @return [String, nil]
-
1
optional :instructions, String, nil?: true
-
-
# @!attribute max_completion_tokens
-
# The maximum number of completion tokens that may be used over the course of the
-
# run. The run will make a best effort to use only the number of completion tokens
-
# specified, across multiple turns of the run. If the run exceeds the number of
-
# completion tokens specified, the run will end with status `incomplete`. See
-
# `incomplete_details` for more info.
-
#
-
# @return [Integer, nil]
-
1
optional :max_completion_tokens, Integer, nil?: true
-
-
# @!attribute max_prompt_tokens
-
# The maximum number of prompt tokens that may be used over the course of the run.
-
# The run will make a best effort to use only the number of prompt tokens
-
# specified, across multiple turns of the run. If the run exceeds the number of
-
# prompt tokens specified, the run will end with status `incomplete`. See
-
# `incomplete_details` for more info.
-
#
-
# @return [Integer, nil]
-
1
optional :max_prompt_tokens, Integer, nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute model
-
# The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
-
# be used to execute this run. If a value is provided here, it will override the
-
# model associated with the assistant. If not, the model associated with the
-
# assistant will be used.
-
#
-
# @return [String, Symbol, OpenAI::Models::ChatModel, nil]
-
1
optional :model, union: -> { OpenAI::Beta::Threads::RunCreateParams::Model }, nil?: true
-
-
# @!attribute parallel_tool_calls
-
# Whether to enable
-
# [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
-
# during tool use.
-
#
-
# @return [Boolean, nil]
-
1
optional :parallel_tool_calls, OpenAI::Internal::Type::Boolean
-
-
# @!attribute reasoning_effort
-
# **o-series models only**
-
#
-
# Constrains effort on reasoning for
-
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-
# supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-
# result in faster responses and fewer tokens used on reasoning in a response.
-
#
-
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
-
1
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
-
-
# @!attribute response_format
-
# Specifies the format that the model must output. Compatible with
-
# [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
-
# [GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
-
# and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
-
#
-
# Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
-
# Outputs which ensures the model will match your supplied JSON schema. Learn more
-
# in the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
-
# message the model generates is valid JSON.
-
#
-
# **Important:** when using JSON mode, you **must** also instruct the model to
-
# produce JSON yourself via a system or user message. Without this, the model may
-
# generate an unending stream of whitespace until the generation reaches the token
-
# limit, resulting in a long-running and seemingly "stuck" request. Also note that
-
# the message content may be partially cut off if `finish_reason="length"`, which
-
# indicates the generation exceeded `max_tokens` or the conversation exceeded the
-
# max context length.
-
#
-
# @return [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil]
-
1
optional :response_format, union: -> { OpenAI::Beta::AssistantResponseFormatOption }, nil?: true
-
-
# @!attribute temperature
-
# What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
-
# make the output more random, while lower values like 0.2 will make it more
-
# focused and deterministic.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float, nil?: true
-
-
# @!attribute tool_choice
-
# Controls which (if any) tool is called by the model. `none` means the model will
-
# not call any tools and instead generates a message. `auto` is the default value
-
# and means the model can pick between generating a message or calling one or more
-
# tools. `required` means the model must call one or more tools before responding
-
# to the user. Specifying a particular tool like `{"type": "file_search"}` or
-
# `{"type": "function", "function": {"name": "my_function"}}` forces the model to
-
# call that tool.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil]
-
1
optional :tool_choice, union: -> { OpenAI::Beta::AssistantToolChoiceOption }, nil?: true
-
-
# @!attribute tools
-
# Override the tools the assistant can use for this run. This is useful for
-
# modifying the behavior on a per-run basis.
-
#
-
# @return [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool>, nil]
-
1
optional :tools,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::AssistantTool]
-
},
-
nil?: true
-
-
# @!attribute top_p
-
# An alternative to sampling with temperature, called nucleus sampling, where the
-
# model considers the results of the tokens with top_p probability mass. So 0.1
-
# means only the tokens comprising the top 10% probability mass are considered.
-
#
-
# We generally recommend altering this or temperature but not both.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float, nil?: true
-
-
# @!attribute truncation_strategy
-
# Controls for how a thread will be truncated prior to the run. Use this to
-
# control the intial context window of the run.
-
#
-
# @return [OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy, nil]
-
1
optional :truncation_strategy,
-
-> { OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy },
-
nil?: true
-
-
# @!method initialize(assistant_id:, include: nil, additional_instructions: nil, additional_messages: nil, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, reasoning_effort: nil, response_format: nil, temperature: nil, tool_choice: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::RunCreateParams} for more details.
-
#
-
# @param assistant_id [String] The ID of the [assistant](https://platform.openai.com/docs/api-reference/assista
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Beta::Threads::Runs::RunStepInclude>] A list of additional fields to include in the response. Currently the only suppo
-
#
-
# @param additional_instructions [String, nil] Appends additional instructions at the end of the instructions for the run. This
-
#
-
# @param additional_messages [Array<OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage>, nil] Adds additional messages to the thread before creating the run.
-
#
-
# @param instructions [String, nil] Overrides the [instructions](https://platform.openai.com/docs/api-reference/assi
-
#
-
# @param max_completion_tokens [Integer, nil] The maximum number of completion tokens that may be used over the course of the
-
#
-
# @param max_prompt_tokens [Integer, nil] The maximum number of prompt tokens that may be used over the course of the run.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param model [String, Symbol, OpenAI::Models::ChatModel, nil] The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
-
#
-
# @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g
-
#
-
# @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
-
#
-
# @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https:
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param tool_choice [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] Controls which (if any) tool is called by the model.
-
#
-
# @param tools [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool>, nil] Override the tools the assistant can use for this run. This is useful for modify
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the
-
#
-
# @param truncation_strategy [OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy, nil] Controls for how a thread will be truncated prior to the run. Use this to contro
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
1
class AdditionalMessage < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The text contents of the message.
-
#
-
# @return [String, Array<OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlockParam>]
-
1
required :content,
-
union: -> {
-
OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Content
-
}
-
-
# @!attribute role
-
# The role of the entity that is creating the message. Allowed values include:
-
#
-
# - `user`: Indicates the message is sent by an actual user and should be used in
-
# most cases to represent user-generated messages.
-
# - `assistant`: Indicates the message is generated by the assistant. Use this
-
# value to insert messages from the assistant into the conversation.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Role]
-
1
required :role, enum: -> { OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Role }
-
-
# @!attribute attachments
-
# A list of files attached to the message, and the tools they should be added to.
-
#
-
# @return [Array<OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment>, nil]
-
1
optional :attachments,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment]
-
},
-
nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!method initialize(content:, role:, attachments: nil, metadata: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage} for more
-
# details.
-
#
-
# @param content [String, Array<OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlockParam>] The text contents of the message.
-
#
-
# @param role [Symbol, OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Role] The role of the entity that is creating the message. Allowed values include:
-
#
-
# @param attachments [Array<OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment>, nil] A list of files attached to the message, and the tools they should be added to.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
-
# The text contents of the message.
-
#
-
# @see OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# The text contents of the message.
-
1
variant String
-
-
# An array of content parts with a defined type, each can be of type `text` or images can be passed with `image_url` or `image_file`. Image types are only supported on [Vision-compatible models](https://platform.openai.com/docs/models).
-
1
variant -> { OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Content::MessageContentPartParamArray }
-
-
# @!method self.variants
-
# @return [Array(String, Array<OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlockParam>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
MessageContentPartParamArray =
-
1
OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Beta::Threads::MessageContentPartParam }]
-
end
-
-
# The role of the entity that is creating the message. Allowed values include:
-
#
-
# - `user`: Indicates the message is sent by an actual user and should be used in
-
# most cases to represent user-generated messages.
-
# - `assistant`: Indicates the message is generated by the assistant. Use this
-
# value to insert messages from the assistant into the conversation.
-
#
-
# @see OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage#role
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
USER = :user
-
1
ASSISTANT = :assistant
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
class Attachment < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_id
-
# The ID of the file to attach to the message.
-
#
-
# @return [String, nil]
-
1
optional :file_id, String
-
-
# @!attribute tools
-
# The tools to add this file to.
-
#
-
# @return [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch>, nil]
-
1
optional :tools,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool]
-
}
-
-
# @!method initialize(file_id: nil, tools: nil)
-
# @param file_id [String] The ID of the file to attach to the message.
-
#
-
# @param tools [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch>] The tools to add this file to.
-
-
1
module Tool
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :code_interpreter, -> { OpenAI::Beta::CodeInterpreterTool }
-
-
1
variant :file_search,
-
-> {
-
OpenAI::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch
-
}
-
-
1
class FileSearch < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of tool being defined: `file_search`
-
#
-
# @return [Symbol, :file_search]
-
1
required :type, const: :file_search
-
-
# @!method initialize(type: :file_search)
-
# @param type [Symbol, :file_search] The type of tool being defined: `file_search`
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage::Attachment::Tool::FileSearch)]
-
end
-
end
-
end
-
-
# The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
-
# be used to execute this run. If a value is provided here, it will override the
-
# model associated with the assistant. If not, the model associated with the
-
# assistant will be used.
-
1
module Model
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
# The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used.
-
1
variant enum: -> { OpenAI::ChatModel }
-
-
# @!method self.variants
-
# @return [Array(String, Symbol, OpenAI::Models::ChatModel)]
-
end
-
-
1
class TruncationStrategy < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The truncation strategy to use for the thread. The default is `auto`. If set to
-
# `last_messages`, the thread will be truncated to the n most recent messages in
-
# the thread. When set to `auto`, messages in the middle of the thread will be
-
# dropped to fit the context length of the model, `max_prompt_tokens`.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::Type]
-
1
required :type, enum: -> { OpenAI::Beta::Threads::RunCreateParams::TruncationStrategy::Type }
-
-
# @!attribute last_messages
-
# The number of most recent messages from the thread when constructing the context
-
# for the run.
-
#
-
# @return [Integer, nil]
-
1
optional :last_messages, Integer, nil?: true
-
-
# @!method initialize(type:, last_messages: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy} for more
-
# details.
-
#
-
# Controls for how a thread will be truncated prior to the run. Use this to
-
# control the intial context window of the run.
-
#
-
# @param type [Symbol, OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy::Type] The truncation strategy to use for the thread. The default is `auto`. If set to
-
#
-
# @param last_messages [Integer, nil] The number of most recent messages from the thread when constructing the context
-
-
# The truncation strategy to use for the thread. The default is `auto`. If set to
-
# `last_messages`, the thread will be truncated to the n most recent messages in
-
# the thread. When set to `auto`, messages in the middle of the thread will be
-
# dropped to fit the context length of the model, `max_prompt_tokens`.
-
#
-
# @see OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
LAST_MESSAGES = :last_messages
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# @see OpenAI::Resources::Beta::Threads::Runs#list
-
1
class RunListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute after
-
# A cursor for use in pagination. `after` is an object ID that defines your place
-
# in the list. For instance, if you make a list request and receive 100 objects,
-
# ending with obj_foo, your subsequent call can include after=obj_foo in order to
-
# fetch the next page of the list.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute before
-
# A cursor for use in pagination. `before` is an object ID that defines your place
-
# in the list. For instance, if you make a list request and receive 100 objects,
-
# starting with obj_foo, your subsequent call can include before=obj_foo in order
-
# to fetch the previous page of the list.
-
#
-
# @return [String, nil]
-
1
optional :before, String
-
-
# @!attribute limit
-
# A limit on the number of objects to be returned. Limit can range between 1 and
-
# 100, and the default is 20.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!attribute order
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::RunListParams::Order, nil]
-
1
optional :order, enum: -> { OpenAI::Beta::Threads::RunListParams::Order }
-
-
# @!method initialize(after: nil, before: nil, limit: nil, order: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::RunListParams} for more details.
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param order [Symbol, OpenAI::Models::Beta::Threads::RunListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
1
module Order
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASC = :asc
-
1
DESC = :desc
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# @see OpenAI::Resources::Beta::Threads::Runs#retrieve
-
1
class RunRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute thread_id
-
#
-
# @return [String]
-
1
required :thread_id, String
-
-
# @!method initialize(thread_id:, request_options: {})
-
# @param thread_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# The status of the run, which can be either `queued`, `in_progress`,
-
# `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`,
-
# `incomplete`, or `expired`.
-
1
module RunStatus
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
QUEUED = :queued
-
1
IN_PROGRESS = :in_progress
-
1
REQUIRES_ACTION = :requires_action
-
1
CANCELLING = :cancelling
-
1
CANCELLED = :cancelled
-
1
FAILED = :failed
-
1
COMPLETED = :completed
-
1
INCOMPLETE = :incomplete
-
1
EXPIRED = :expired
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# @see OpenAI::Resources::Beta::Threads::Runs#submit_tool_outputs
-
#
-
# @see OpenAI::Resources::Beta::Threads::Runs#submit_tool_outputs_stream_raw
-
1
class RunSubmitToolOutputsParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute thread_id
-
#
-
# @return [String]
-
1
required :thread_id, String
-
-
# @!attribute tool_outputs
-
# A list of tools for which the outputs are being submitted.
-
#
-
# @return [Array<OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput>]
-
1
required :tool_outputs,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput]
-
}
-
-
# @!method initialize(thread_id:, tool_outputs:, request_options: {})
-
# @param thread_id [String]
-
#
-
# @param tool_outputs [Array<OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput>] A list of tools for which the outputs are being submitted.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
1
class ToolOutput < OpenAI::Internal::Type::BaseModel
-
# @!attribute output
-
# The output of the tool call to be submitted to continue the run.
-
#
-
# @return [String, nil]
-
1
optional :output, String
-
-
# @!attribute tool_call_id
-
# The ID of the tool call in the `required_action` object within the run object
-
# the output is being submitted for.
-
#
-
# @return [String, nil]
-
1
optional :tool_call_id, String
-
-
# @!method initialize(output: nil, tool_call_id: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput} for more
-
# details.
-
#
-
# @param output [String] The output of the tool call to be submitted to continue the run.
-
#
-
# @param tool_call_id [String] The ID of the tool call in the `required_action` object within the run object th
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
# @see OpenAI::Resources::Beta::Threads::Runs#update
-
1
class RunUpdateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute thread_id
-
#
-
# @return [String]
-
1
required :thread_id, String
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!method initialize(thread_id:, metadata: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::RunUpdateParams} for more details.
-
#
-
# @param thread_id [String]
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
1
class CodeInterpreterLogs < OpenAI::Internal::Type::BaseModel
-
# @!attribute index
-
# The index of the output in the outputs array.
-
#
-
# @return [Integer]
-
1
required :index, Integer
-
-
# @!attribute type
-
# Always `logs`.
-
#
-
# @return [Symbol, :logs]
-
1
required :type, const: :logs
-
-
# @!attribute logs
-
# The text output from the Code Interpreter tool call.
-
#
-
# @return [String, nil]
-
1
optional :logs, String
-
-
# @!method initialize(index:, logs: nil, type: :logs)
-
# Text output from the Code Interpreter tool call as part of a run step.
-
#
-
# @param index [Integer] The index of the output in the outputs array.
-
#
-
# @param logs [String] The text output from the Code Interpreter tool call.
-
#
-
# @param type [Symbol, :logs] Always `logs`.
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
1
class CodeInterpreterOutputImage < OpenAI::Internal::Type::BaseModel
-
# @!attribute index
-
# The index of the output in the outputs array.
-
#
-
# @return [Integer]
-
1
required :index, Integer
-
-
# @!attribute type
-
# Always `image`.
-
#
-
# @return [Symbol, :image]
-
1
required :type, const: :image
-
-
# @!attribute image
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image, nil]
-
1
optional :image, -> { OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage::Image }
-
-
# @!method initialize(index:, image: nil, type: :image)
-
# @param index [Integer] The index of the output in the outputs array.
-
#
-
# @param image [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image]
-
#
-
# @param type [Symbol, :image] Always `image`.
-
-
# @see OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage#image
-
1
class Image < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_id
-
# The [file](https://platform.openai.com/docs/api-reference/files) ID of the
-
# image.
-
#
-
# @return [String, nil]
-
1
optional :file_id, String
-
-
# @!method initialize(file_id: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage::Image} for
-
# more details.
-
#
-
# @param file_id [String] The [file](https://platform.openai.com/docs/api-reference/files) ID of the image
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
1
class CodeInterpreterToolCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The ID of the tool call.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute code_interpreter
-
# The Code Interpreter tool call definition.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter]
-
1
required :code_interpreter,
-
-> {
-
OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter
-
}
-
-
# @!attribute type
-
# The type of tool call. This is always going to be `code_interpreter` for this
-
# type of tool call.
-
#
-
# @return [Symbol, :code_interpreter]
-
1
required :type, const: :code_interpreter
-
-
# @!method initialize(id:, code_interpreter:, type: :code_interpreter)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall} for more details.
-
#
-
# Details of the Code Interpreter tool call the run step was involved in.
-
#
-
# @param id [String] The ID of the tool call.
-
#
-
# @param code_interpreter [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter] The Code Interpreter tool call definition.
-
#
-
# @param type [Symbol, :code_interpreter] The type of tool call. This is always going to be `code_interpreter` for this ty
-
-
# @see OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall#code_interpreter
-
1
class CodeInterpreter < OpenAI::Internal::Type::BaseModel
-
# @!attribute input
-
# The input to the Code Interpreter tool call.
-
#
-
# @return [String]
-
1
required :input, String
-
-
# @!attribute outputs
-
# The outputs from the Code Interpreter tool call. Code Interpreter can output one
-
# or more items, including text (`logs`) or images (`image`). Each of these are
-
# represented by a different object type.
-
#
-
# @return [Array<OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image>]
-
1
required :outputs,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output]
-
}
-
-
# @!method initialize(input:, outputs:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter}
-
# for more details.
-
#
-
# The Code Interpreter tool call definition.
-
#
-
# @param input [String] The input to the Code Interpreter tool call.
-
#
-
# @param outputs [Array<OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image>] The outputs from the Code Interpreter tool call. Code Interpreter can output one
-
-
# Text output from the Code Interpreter tool call as part of a run step.
-
1
module Output
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# Text output from the Code Interpreter tool call as part of a run step.
-
1
variant :logs,
-
-> {
-
OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs
-
}
-
-
1
variant :image,
-
-> {
-
OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image
-
}
-
-
1
class Logs < OpenAI::Internal::Type::BaseModel
-
# @!attribute logs
-
# The text output from the Code Interpreter tool call.
-
#
-
# @return [String]
-
1
required :logs, String
-
-
# @!attribute type
-
# Always `logs`.
-
#
-
# @return [Symbol, :logs]
-
1
required :type, const: :logs
-
-
# @!method initialize(logs:, type: :logs)
-
# Text output from the Code Interpreter tool call as part of a run step.
-
#
-
# @param logs [String] The text output from the Code Interpreter tool call.
-
#
-
# @param type [Symbol, :logs] Always `logs`.
-
end
-
-
1
class Image < OpenAI::Internal::Type::BaseModel
-
# @!attribute image
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image]
-
1
required :image,
-
-> {
-
OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image
-
}
-
-
# @!attribute type
-
# Always `image`.
-
#
-
# @return [Symbol, :image]
-
1
required :type, const: :image
-
-
# @!method initialize(image:, type: :image)
-
# @param image [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image]
-
#
-
# @param type [Symbol, :image] Always `image`.
-
-
# @see OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image#image
-
1
class Image < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_id
-
# The [file](https://platform.openai.com/docs/api-reference/files) ID of the
-
# image.
-
#
-
# @return [String]
-
1
required :file_id, String
-
-
# @!method initialize(file_id:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image::Image}
-
# for more details.
-
#
-
# @param file_id [String] The [file](https://platform.openai.com/docs/api-reference/files) ID of the image
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Logs, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall::CodeInterpreter::Output::Image)]
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
1
class CodeInterpreterToolCallDelta < OpenAI::Internal::Type::BaseModel
-
# @!attribute index
-
# The index of the tool call in the tool calls array.
-
#
-
# @return [Integer]
-
1
required :index, Integer
-
-
# @!attribute type
-
# The type of tool call. This is always going to be `code_interpreter` for this
-
# type of tool call.
-
#
-
# @return [Symbol, :code_interpreter]
-
1
required :type, const: :code_interpreter
-
-
# @!attribute id
-
# The ID of the tool call.
-
#
-
# @return [String, nil]
-
1
optional :id, String
-
-
# @!attribute code_interpreter
-
# The Code Interpreter tool call definition.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter, nil]
-
1
optional :code_interpreter,
-
-> { OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter }
-
-
# @!method initialize(index:, id: nil, code_interpreter: nil, type: :code_interpreter)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta} for more
-
# details.
-
#
-
# Details of the Code Interpreter tool call the run step was involved in.
-
#
-
# @param index [Integer] The index of the tool call in the tool calls array.
-
#
-
# @param id [String] The ID of the tool call.
-
#
-
# @param code_interpreter [OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter] The Code Interpreter tool call definition.
-
#
-
# @param type [Symbol, :code_interpreter] The type of tool call. This is always going to be `code_interpreter` for this ty
-
-
# @see OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta#code_interpreter
-
1
class CodeInterpreter < OpenAI::Internal::Type::BaseModel
-
# @!attribute input
-
# The input to the Code Interpreter tool call.
-
#
-
# @return [String, nil]
-
1
optional :input, String
-
-
# @!attribute outputs
-
# The outputs from the Code Interpreter tool call. Code Interpreter can output one
-
# or more items, including text (`logs`) or images (`image`). Each of these are
-
# represented by a different object type.
-
#
-
# @return [Array<OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage>, nil]
-
1
optional :outputs,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter::Output]
-
}
-
-
# @!method initialize(input: nil, outputs: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta::CodeInterpreter}
-
# for more details.
-
#
-
# The Code Interpreter tool call definition.
-
#
-
# @param input [String] The input to the Code Interpreter tool call.
-
#
-
# @param outputs [Array<OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage>] The outputs from the Code Interpreter tool call. Code Interpreter can output one
-
-
# Text output from the Code Interpreter tool call as part of a run step.
-
1
module Output
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# Text output from the Code Interpreter tool call as part of a run step.
-
1
variant :logs, -> { OpenAI::Beta::Threads::Runs::CodeInterpreterLogs }
-
-
1
variant :image, -> { OpenAI::Beta::Threads::Runs::CodeInterpreterOutputImage }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterLogs, OpenAI::Models::Beta::Threads::Runs::CodeInterpreterOutputImage)]
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
1
class FileSearchToolCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The ID of the tool call object.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute file_search
-
# For now, this is always going to be an empty object.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch]
-
1
required :file_search, -> { OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch }
-
-
# @!attribute type
-
# The type of tool call. This is always going to be `file_search` for this type of
-
# tool call.
-
#
-
# @return [Symbol, :file_search]
-
1
required :type, const: :file_search
-
-
# @!method initialize(id:, file_search:, type: :file_search)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall} for more details.
-
#
-
# @param id [String] The ID of the tool call object.
-
#
-
# @param file_search [OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch] For now, this is always going to be an empty object.
-
#
-
# @param type [Symbol, :file_search] The type of tool call. This is always going to be `file_search` for this type of
-
-
# @see OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall#file_search
-
1
class FileSearch < OpenAI::Internal::Type::BaseModel
-
# @!attribute ranking_options
-
# The ranking options for the file search.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions, nil]
-
1
optional :ranking_options,
-
-> { OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions }
-
-
# @!attribute results
-
# The results of the file search.
-
#
-
# @return [Array<OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result>, nil]
-
1
optional :results,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result]
-
}
-
-
# @!method initialize(ranking_options: nil, results: nil)
-
# For now, this is always going to be an empty object.
-
#
-
# @param ranking_options [OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions] The ranking options for the file search.
-
#
-
# @param results [Array<OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result>] The results of the file search.
-
-
# @see OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch#ranking_options
-
1
class RankingOptions < OpenAI::Internal::Type::BaseModel
-
# @!attribute ranker
-
# The ranker to use for the file search. If not specified will use the `auto`
-
# ranker.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::Ranker]
-
1
required :ranker,
-
enum: -> {
-
OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::Ranker
-
}
-
-
# @!attribute score_threshold
-
# The score threshold for the file search. All values must be a floating point
-
# number between 0 and 1.
-
#
-
# @return [Float]
-
1
required :score_threshold, Float
-
-
# @!method initialize(ranker:, score_threshold:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions}
-
# for more details.
-
#
-
# The ranking options for the file search.
-
#
-
# @param ranker [Symbol, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions::Ranker] The ranker to use for the file search. If not specified will use the `auto` rank
-
#
-
# @param score_threshold [Float] The score threshold for the file search. All values must be a floating point num
-
-
# The ranker to use for the file search. If not specified will use the `auto`
-
# ranker.
-
#
-
# @see OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::RankingOptions#ranker
-
1
module Ranker
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
DEFAULT_2024_08_21 = :default_2024_08_21
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
1
class Result < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_id
-
# The ID of the file that result was found in.
-
#
-
# @return [String]
-
1
required :file_id, String
-
-
# @!attribute file_name
-
# The name of the file that result was found in.
-
#
-
# @return [String]
-
1
required :file_name, String
-
-
# @!attribute score
-
# The score of the result. All values must be a floating point number between 0
-
# and 1.
-
#
-
# @return [Float]
-
1
required :score, Float
-
-
# @!attribute content
-
# The content of the result that was found. The content is only included if
-
# requested via the include query parameter.
-
#
-
# @return [Array<OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content>, nil]
-
1
optional :content,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content]
-
}
-
-
# @!method initialize(file_id:, file_name:, score:, content: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result}
-
# for more details.
-
#
-
# A result instance of the file search.
-
#
-
# @param file_id [String] The ID of the file that result was found in.
-
#
-
# @param file_name [String] The name of the file that result was found in.
-
#
-
# @param score [Float] The score of the result. All values must be a floating point number between 0 an
-
#
-
# @param content [Array<OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content>] The content of the result that was found. The content is only included if reques
-
-
1
class Content < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The text content of the file.
-
#
-
# @return [String, nil]
-
1
optional :text, String
-
-
# @!attribute type
-
# The type of the content.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::Type, nil]
-
1
optional :type,
-
enum: -> {
-
OpenAI::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::Type
-
}
-
-
# @!method initialize(text: nil, type: nil)
-
# @param text [String] The text content of the file.
-
#
-
# @param type [Symbol, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content::Type] The type of the content.
-
-
# The type of the content.
-
#
-
# @see OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall::FileSearch::Result::Content#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TEXT = :text
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
1
class FileSearchToolCallDelta < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_search
-
# For now, this is always going to be an empty object.
-
#
-
# @return [Object]
-
1
required :file_search, OpenAI::Internal::Type::Unknown
-
-
# @!attribute index
-
# The index of the tool call in the tool calls array.
-
#
-
# @return [Integer]
-
1
required :index, Integer
-
-
# @!attribute type
-
# The type of tool call. This is always going to be `file_search` for this type of
-
# tool call.
-
#
-
# @return [Symbol, :file_search]
-
1
required :type, const: :file_search
-
-
# @!attribute id
-
# The ID of the tool call object.
-
#
-
# @return [String, nil]
-
1
optional :id, String
-
-
# @!method initialize(file_search:, index:, id: nil, type: :file_search)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta} for more details.
-
#
-
# @param file_search [Object] For now, this is always going to be an empty object.
-
#
-
# @param index [Integer] The index of the tool call in the tool calls array.
-
#
-
# @param id [String] The ID of the tool call object.
-
#
-
# @param type [Symbol, :file_search] The type of tool call. This is always going to be `file_search` for this type of
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
1
class FunctionToolCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The ID of the tool call object.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute function
-
# The definition of the function that was called.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::Function]
-
1
required :function, -> { OpenAI::Beta::Threads::Runs::FunctionToolCall::Function }
-
-
# @!attribute type
-
# The type of tool call. This is always going to be `function` for this type of
-
# tool call.
-
#
-
# @return [Symbol, :function]
-
1
required :type, const: :function
-
-
# @!method initialize(id:, function:, type: :function)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::FunctionToolCall} for more details.
-
#
-
# @param id [String] The ID of the tool call object.
-
#
-
# @param function [OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::Function] The definition of the function that was called.
-
#
-
# @param type [Symbol, :function] The type of tool call. This is always going to be `function` for this type of to
-
-
# @see OpenAI::Models::Beta::Threads::Runs::FunctionToolCall#function
-
1
class Function < OpenAI::Internal::Type::BaseModel
-
# @!attribute arguments
-
# The arguments passed to the function.
-
#
-
# @return [String]
-
1
required :arguments, String
-
-
# @!attribute name
-
# The name of the function.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute output
-
# The output of the function. This will be `null` if the outputs have not been
-
# [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
-
# yet.
-
#
-
# @return [String, nil]
-
1
required :output, String, nil?: true
-
-
# @!method initialize(arguments:, name:, output:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::FunctionToolCall::Function} for more
-
# details.
-
#
-
# The definition of the function that was called.
-
#
-
# @param arguments [String] The arguments passed to the function.
-
#
-
# @param name [String] The name of the function.
-
#
-
# @param output [String, nil] The output of the function. This will be `null` if the outputs have not been [su
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
1
class FunctionToolCallDelta < OpenAI::Internal::Type::BaseModel
-
# @!attribute index
-
# The index of the tool call in the tool calls array.
-
#
-
# @return [Integer]
-
1
required :index, Integer
-
-
# @!attribute type
-
# The type of tool call. This is always going to be `function` for this type of
-
# tool call.
-
#
-
# @return [Symbol, :function]
-
1
required :type, const: :function
-
-
# @!attribute id
-
# The ID of the tool call object.
-
#
-
# @return [String, nil]
-
1
optional :id, String
-
-
# @!attribute function
-
# The definition of the function that was called.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function, nil]
-
1
optional :function, -> { OpenAI::Beta::Threads::Runs::FunctionToolCallDelta::Function }
-
-
# @!method initialize(index:, id: nil, function: nil, type: :function)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta} for more details.
-
#
-
# @param index [Integer] The index of the tool call in the tool calls array.
-
#
-
# @param id [String] The ID of the tool call object.
-
#
-
# @param function [OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function] The definition of the function that was called.
-
#
-
# @param type [Symbol, :function] The type of tool call. This is always going to be `function` for this type of to
-
-
# @see OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta#function
-
1
class Function < OpenAI::Internal::Type::BaseModel
-
# @!attribute arguments
-
# The arguments passed to the function.
-
#
-
# @return [String, nil]
-
1
optional :arguments, String
-
-
# @!attribute name
-
# The name of the function.
-
#
-
# @return [String, nil]
-
1
optional :name, String
-
-
# @!attribute output
-
# The output of the function. This will be `null` if the outputs have not been
-
# [submitted](https://platform.openai.com/docs/api-reference/runs/submitToolOutputs)
-
# yet.
-
#
-
# @return [String, nil]
-
1
optional :output, String, nil?: true
-
-
# @!method initialize(arguments: nil, name: nil, output: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta::Function} for more
-
# details.
-
#
-
# The definition of the function that was called.
-
#
-
# @param arguments [String] The arguments passed to the function.
-
#
-
# @param name [String] The name of the function.
-
#
-
# @param output [String, nil] The output of the function. This will be `null` if the outputs have not been [su
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
1
class MessageCreationStepDetails < OpenAI::Internal::Type::BaseModel
-
# @!attribute message_creation
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation]
-
1
required :message_creation,
-
-> { OpenAI::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation }
-
-
# @!attribute type
-
# Always `message_creation`.
-
#
-
# @return [Symbol, :message_creation]
-
1
required :type, const: :message_creation
-
-
# @!method initialize(message_creation:, type: :message_creation)
-
# Details of the message creation by the run step.
-
#
-
# @param message_creation [OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails::MessageCreation]
-
#
-
# @param type [Symbol, :message_creation] Always `message_creation`.
-
-
# @see OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails#message_creation
-
1
class MessageCreation < OpenAI::Internal::Type::BaseModel
-
# @!attribute message_id
-
# The ID of the message that was created by this run step.
-
#
-
# @return [String]
-
1
required :message_id, String
-
-
# @!method initialize(message_id:)
-
# @param message_id [String] The ID of the message that was created by this run step.
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
# @see OpenAI::Resources::Beta::Threads::Runs::Steps#retrieve
-
1
class RunStep < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The identifier of the run step, which can be referenced in API endpoints.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute assistant_id
-
# The ID of the
-
# [assistant](https://platform.openai.com/docs/api-reference/assistants)
-
# associated with the run step.
-
#
-
# @return [String]
-
1
required :assistant_id, String
-
-
# @!attribute cancelled_at
-
# The Unix timestamp (in seconds) for when the run step was cancelled.
-
#
-
# @return [Integer, nil]
-
1
required :cancelled_at, Integer, nil?: true
-
-
# @!attribute completed_at
-
# The Unix timestamp (in seconds) for when the run step completed.
-
#
-
# @return [Integer, nil]
-
1
required :completed_at, Integer, nil?: true
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the run step was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute expired_at
-
# The Unix timestamp (in seconds) for when the run step expired. A step is
-
# considered expired if the parent run is expired.
-
#
-
# @return [Integer, nil]
-
1
required :expired_at, Integer, nil?: true
-
-
# @!attribute failed_at
-
# The Unix timestamp (in seconds) for when the run step failed.
-
#
-
# @return [Integer, nil]
-
1
required :failed_at, Integer, nil?: true
-
-
# @!attribute last_error
-
# The last error associated with this run step. Will be `null` if there are no
-
# errors.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStep::LastError, nil]
-
1
required :last_error, -> { OpenAI::Beta::Threads::Runs::RunStep::LastError }, nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute object
-
# The object type, which is always `thread.run.step`.
-
#
-
# @return [Symbol, :"thread.run.step"]
-
1
required :object, const: :"thread.run.step"
-
-
# @!attribute run_id
-
# The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that
-
# this run step is a part of.
-
#
-
# @return [String]
-
1
required :run_id, String
-
-
# @!attribute status
-
# The status of the run step, which can be either `in_progress`, `cancelled`,
-
# `failed`, `completed`, or `expired`.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::Runs::RunStep::Status]
-
1
required :status, enum: -> { OpenAI::Beta::Threads::Runs::RunStep::Status }
-
-
# @!attribute step_details
-
# The details of the run step.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails, OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails]
-
1
required :step_details, union: -> { OpenAI::Beta::Threads::Runs::RunStep::StepDetails }
-
-
# @!attribute thread_id
-
# The ID of the [thread](https://platform.openai.com/docs/api-reference/threads)
-
# that was run.
-
#
-
# @return [String]
-
1
required :thread_id, String
-
-
# @!attribute type
-
# The type of run step, which can be either `message_creation` or `tool_calls`.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::Runs::RunStep::Type]
-
1
required :type, enum: -> { OpenAI::Beta::Threads::Runs::RunStep::Type }
-
-
# @!attribute usage
-
# Usage statistics related to the run step. This value will be `null` while the
-
# run step's status is `in_progress`.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStep::Usage, nil]
-
1
required :usage, -> { OpenAI::Beta::Threads::Runs::RunStep::Usage }, nil?: true
-
-
# @!method initialize(id:, assistant_id:, cancelled_at:, completed_at:, created_at:, expired_at:, failed_at:, last_error:, metadata:, run_id:, status:, step_details:, thread_id:, type:, usage:, object: :"thread.run.step")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::RunStep} for more details.
-
#
-
# Represents a step in execution of a run.
-
#
-
# @param id [String] The identifier of the run step, which can be referenced in API endpoints.
-
#
-
# @param assistant_id [String] The ID of the [assistant](https://platform.openai.com/docs/api-reference/assista
-
#
-
# @param cancelled_at [Integer, nil] The Unix timestamp (in seconds) for when the run step was cancelled.
-
#
-
# @param completed_at [Integer, nil] The Unix timestamp (in seconds) for when the run step completed.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the run step was created.
-
#
-
# @param expired_at [Integer, nil] The Unix timestamp (in seconds) for when the run step expired. A step is conside
-
#
-
# @param failed_at [Integer, nil] The Unix timestamp (in seconds) for when the run step failed.
-
#
-
# @param last_error [OpenAI::Models::Beta::Threads::Runs::RunStep::LastError, nil] The last error associated with this run step. Will be `null` if there are no err
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param run_id [String] The ID of the [run](https://platform.openai.com/docs/api-reference/runs) that th
-
#
-
# @param status [Symbol, OpenAI::Models::Beta::Threads::Runs::RunStep::Status] The status of the run step, which can be either `in_progress`, `cancelled`, `fai
-
#
-
# @param step_details [OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails, OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails] The details of the run step.
-
#
-
# @param thread_id [String] The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) t
-
#
-
# @param type [Symbol, OpenAI::Models::Beta::Threads::Runs::RunStep::Type] The type of run step, which can be either `message_creation` or `tool_calls`.
-
#
-
# @param usage [OpenAI::Models::Beta::Threads::Runs::RunStep::Usage, nil] Usage statistics related to the run step. This value will be `null` while the ru
-
#
-
# @param object [Symbol, :"thread.run.step"] The object type, which is always `thread.run.step`.
-
-
# @see OpenAI::Models::Beta::Threads::Runs::RunStep#last_error
-
1
class LastError < OpenAI::Internal::Type::BaseModel
-
# @!attribute code
-
# One of `server_error` or `rate_limit_exceeded`.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::Runs::RunStep::LastError::Code]
-
1
required :code, enum: -> { OpenAI::Beta::Threads::Runs::RunStep::LastError::Code }
-
-
# @!attribute message
-
# A human-readable description of the error.
-
#
-
# @return [String]
-
1
required :message, String
-
-
# @!method initialize(code:, message:)
-
# The last error associated with this run step. Will be `null` if there are no
-
# errors.
-
#
-
# @param code [Symbol, OpenAI::Models::Beta::Threads::Runs::RunStep::LastError::Code] One of `server_error` or `rate_limit_exceeded`.
-
#
-
# @param message [String] A human-readable description of the error.
-
-
# One of `server_error` or `rate_limit_exceeded`.
-
#
-
# @see OpenAI::Models::Beta::Threads::Runs::RunStep::LastError#code
-
1
module Code
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
SERVER_ERROR = :server_error
-
1
RATE_LIMIT_EXCEEDED = :rate_limit_exceeded
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
# The status of the run step, which can be either `in_progress`, `cancelled`,
-
# `failed`, `completed`, or `expired`.
-
#
-
# @see OpenAI::Models::Beta::Threads::Runs::RunStep#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
CANCELLED = :cancelled
-
1
FAILED = :failed
-
1
COMPLETED = :completed
-
1
EXPIRED = :expired
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The details of the run step.
-
#
-
# @see OpenAI::Models::Beta::Threads::Runs::RunStep#step_details
-
1
module StepDetails
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# Details of the message creation by the run step.
-
1
variant :message_creation, -> { OpenAI::Beta::Threads::Runs::MessageCreationStepDetails }
-
-
# Details of the tool call.
-
1
variant :tool_calls, -> { OpenAI::Beta::Threads::Runs::ToolCallsStepDetails }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::Threads::Runs::MessageCreationStepDetails, OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails)]
-
end
-
-
# The type of run step, which can be either `message_creation` or `tool_calls`.
-
#
-
# @see OpenAI::Models::Beta::Threads::Runs::RunStep#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
MESSAGE_CREATION = :message_creation
-
1
TOOL_CALLS = :tool_calls
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @see OpenAI::Models::Beta::Threads::Runs::RunStep#usage
-
1
class Usage < OpenAI::Internal::Type::BaseModel
-
# @!attribute completion_tokens
-
# Number of completion tokens used over the course of the run step.
-
#
-
# @return [Integer]
-
1
required :completion_tokens, Integer
-
-
# @!attribute prompt_tokens
-
# Number of prompt tokens used over the course of the run step.
-
#
-
# @return [Integer]
-
1
required :prompt_tokens, Integer
-
-
# @!attribute total_tokens
-
# Total number of tokens used (prompt + completion).
-
#
-
# @return [Integer]
-
1
required :total_tokens, Integer
-
-
# @!method initialize(completion_tokens:, prompt_tokens:, total_tokens:)
-
# Usage statistics related to the run step. This value will be `null` while the
-
# run step's status is `in_progress`.
-
#
-
# @param completion_tokens [Integer] Number of completion tokens used over the course of the run step.
-
#
-
# @param prompt_tokens [Integer] Number of prompt tokens used over the course of the run step.
-
#
-
# @param total_tokens [Integer] Total number of tokens used (prompt + completion).
-
end
-
end
-
end
-
-
1
RunStep = Runs::RunStep
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
1
class RunStepDelta < OpenAI::Internal::Type::BaseModel
-
# @!attribute step_details
-
# The details of the run step.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta, OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject, nil]
-
1
optional :step_details, union: -> { OpenAI::Beta::Threads::Runs::RunStepDelta::StepDetails }
-
-
# @!method initialize(step_details: nil)
-
# The delta containing the fields that have changed on the run step.
-
#
-
# @param step_details [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta, OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject] The details of the run step.
-
-
# The details of the run step.
-
#
-
# @see OpenAI::Models::Beta::Threads::Runs::RunStepDelta#step_details
-
1
module StepDetails
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# Details of the message creation by the run step.
-
1
variant :message_creation, -> { OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta }
-
-
# Details of the tool call.
-
1
variant :tool_calls, -> { OpenAI::Beta::Threads::Runs::ToolCallDeltaObject }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta, OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject)]
-
end
-
end
-
end
-
-
1
RunStepDelta = Runs::RunStepDelta
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
1
class RunStepDeltaEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The identifier of the run step, which can be referenced in API endpoints.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute delta
-
# The delta containing the fields that have changed on the run step.
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStepDelta]
-
1
required :delta, -> { OpenAI::Beta::Threads::Runs::RunStepDelta }
-
-
# @!attribute object
-
# The object type, which is always `thread.run.step.delta`.
-
#
-
# @return [Symbol, :"thread.run.step.delta"]
-
1
required :object, const: :"thread.run.step.delta"
-
-
# @!method initialize(id:, delta:, object: :"thread.run.step.delta")
-
# Represents a run step delta i.e. any changed fields on a run step during
-
# streaming.
-
#
-
# @param id [String] The identifier of the run step, which can be referenced in API endpoints.
-
#
-
# @param delta [OpenAI::Models::Beta::Threads::Runs::RunStepDelta] The delta containing the fields that have changed on the run step.
-
#
-
# @param object [Symbol, :"thread.run.step.delta"] The object type, which is always `thread.run.step.delta`.
-
end
-
end
-
-
1
RunStepDeltaEvent = Runs::RunStepDeltaEvent
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
1
class RunStepDeltaMessageDelta < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# Always `message_creation`.
-
#
-
# @return [Symbol, :message_creation]
-
1
required :type, const: :message_creation
-
-
# @!attribute message_creation
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation, nil]
-
1
optional :message_creation,
-
-> {
-
OpenAI::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation
-
}
-
-
# @!method initialize(message_creation: nil, type: :message_creation)
-
# Details of the message creation by the run step.
-
#
-
# @param message_creation [OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta::MessageCreation]
-
#
-
# @param type [Symbol, :message_creation] Always `message_creation`.
-
-
# @see OpenAI::Models::Beta::Threads::Runs::RunStepDeltaMessageDelta#message_creation
-
1
class MessageCreation < OpenAI::Internal::Type::BaseModel
-
# @!attribute message_id
-
# The ID of the message that was created by this run step.
-
#
-
# @return [String, nil]
-
1
optional :message_id, String
-
-
# @!method initialize(message_id: nil)
-
# @param message_id [String] The ID of the message that was created by this run step.
-
end
-
end
-
end
-
-
1
RunStepDeltaMessageDelta = Runs::RunStepDeltaMessageDelta
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
1
module RunStepInclude
-
1
extend OpenAI::Internal::Type::Enum
-
-
STEP_DETAILS_TOOL_CALLS_FILE_SEARCH_RESULTS_CONTENT =
-
1
:"step_details.tool_calls[*].file_search.results[*].content"
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
1
RunStepInclude = Runs::RunStepInclude
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
# @see OpenAI::Resources::Beta::Threads::Runs::Steps#list
-
1
class StepListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute thread_id
-
#
-
# @return [String]
-
1
required :thread_id, String
-
-
# @!attribute after
-
# A cursor for use in pagination. `after` is an object ID that defines your place
-
# in the list. For instance, if you make a list request and receive 100 objects,
-
# ending with obj_foo, your subsequent call can include after=obj_foo in order to
-
# fetch the next page of the list.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute before
-
# A cursor for use in pagination. `before` is an object ID that defines your place
-
# in the list. For instance, if you make a list request and receive 100 objects,
-
# starting with obj_foo, your subsequent call can include before=obj_foo in order
-
# to fetch the previous page of the list.
-
#
-
# @return [String, nil]
-
1
optional :before, String
-
-
# @!attribute include
-
# A list of additional fields to include in the response. Currently the only
-
# supported value is `step_details.tool_calls[*].file_search.results[*].content`
-
# to fetch the file search result content.
-
#
-
# See the
-
# [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
-
# for more information.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Beta::Threads::Runs::RunStepInclude>, nil]
-
1
optional :include,
-
-> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Beta::Threads::Runs::RunStepInclude] }
-
-
# @!attribute limit
-
# A limit on the number of objects to be returned. Limit can range between 1 and
-
# 100, and the default is 20.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!attribute order
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
#
-
# @return [Symbol, OpenAI::Models::Beta::Threads::Runs::StepListParams::Order, nil]
-
1
optional :order, enum: -> { OpenAI::Beta::Threads::Runs::StepListParams::Order }
-
-
# @!method initialize(thread_id:, after: nil, before: nil, include: nil, limit: nil, order: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::StepListParams} for more details.
-
#
-
# @param thread_id [String]
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Beta::Threads::Runs::RunStepInclude>] A list of additional fields to include in the response. Currently the only suppo
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param order [Symbol, OpenAI::Models::Beta::Threads::Runs::StepListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
1
module Order
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASC = :asc
-
1
DESC = :desc
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
# @see OpenAI::Resources::Beta::Threads::Runs::Steps#retrieve
-
1
class StepRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute thread_id
-
#
-
# @return [String]
-
1
required :thread_id, String
-
-
# @!attribute run_id
-
#
-
# @return [String]
-
1
required :run_id, String
-
-
# @!attribute include
-
# A list of additional fields to include in the response. Currently the only
-
# supported value is `step_details.tool_calls[*].file_search.results[*].content`
-
# to fetch the file search result content.
-
#
-
# See the
-
# [file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
-
# for more information.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Beta::Threads::Runs::RunStepInclude>, nil]
-
1
optional :include,
-
-> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Beta::Threads::Runs::RunStepInclude] }
-
-
# @!method initialize(thread_id:, run_id:, include: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::StepRetrieveParams} for more details.
-
#
-
# @param thread_id [String]
-
#
-
# @param run_id [String]
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Beta::Threads::Runs::RunStepInclude>] A list of additional fields to include in the response. Currently the only suppo
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
# Details of the Code Interpreter tool call the run step was involved in.
-
1
module ToolCall
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# Details of the Code Interpreter tool call the run step was involved in.
-
1
variant :code_interpreter, -> { OpenAI::Beta::Threads::Runs::CodeInterpreterToolCall }
-
-
1
variant :file_search, -> { OpenAI::Beta::Threads::Runs::FileSearchToolCall }
-
-
1
variant :function, -> { OpenAI::Beta::Threads::Runs::FunctionToolCall }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall, OpenAI::Models::Beta::Threads::Runs::FunctionToolCall)]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
# Details of the Code Interpreter tool call the run step was involved in.
-
1
module ToolCallDelta
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# Details of the Code Interpreter tool call the run step was involved in.
-
1
variant :code_interpreter, -> { OpenAI::Beta::Threads::Runs::CodeInterpreterToolCallDelta }
-
-
1
variant :file_search, -> { OpenAI::Beta::Threads::Runs::FileSearchToolCallDelta }
-
-
1
variant :function, -> { OpenAI::Beta::Threads::Runs::FunctionToolCallDelta }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta, OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta)]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
1
class ToolCallDeltaObject < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# Always `tool_calls`.
-
#
-
# @return [Symbol, :tool_calls]
-
1
required :type, const: :tool_calls
-
-
# @!attribute tool_calls
-
# An array of tool calls the run step was involved in. These can be associated
-
# with one of three types of tools: `code_interpreter`, `file_search`, or
-
# `function`.
-
#
-
# @return [Array<OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta, OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta>, nil]
-
1
optional :tool_calls,
-
-> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::Runs::ToolCallDelta] }
-
-
# @!method initialize(tool_calls: nil, type: :tool_calls)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::ToolCallDeltaObject} for more details.
-
#
-
# Details of the tool call.
-
#
-
# @param tool_calls [Array<OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCallDelta, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCallDelta, OpenAI::Models::Beta::Threads::Runs::FunctionToolCallDelta>] An array of tool calls the run step was involved in. These can be associated wit
-
#
-
# @param type [Symbol, :tool_calls] Always `tool_calls`.
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
module Runs
-
1
class ToolCallsStepDetails < OpenAI::Internal::Type::BaseModel
-
# @!attribute tool_calls
-
# An array of tool calls the run step was involved in. These can be associated
-
# with one of three types of tools: `code_interpreter`, `file_search`, or
-
# `function`.
-
#
-
# @return [Array<OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall, OpenAI::Models::Beta::Threads::Runs::FunctionToolCall>]
-
1
required :tool_calls,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::Runs::ToolCall]
-
}
-
-
# @!attribute type
-
# Always `tool_calls`.
-
#
-
# @return [Symbol, :tool_calls]
-
1
required :type, const: :tool_calls
-
-
# @!method initialize(tool_calls:, type: :tool_calls)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::ToolCallsStepDetails} for more details.
-
#
-
# Details of the tool call.
-
#
-
# @param tool_calls [Array<OpenAI::Models::Beta::Threads::Runs::CodeInterpreterToolCall, OpenAI::Models::Beta::Threads::Runs::FileSearchToolCall, OpenAI::Models::Beta::Threads::Runs::FunctionToolCall>] An array of tool calls the run step was involved in. These can be associated wit
-
#
-
# @param type [Symbol, :tool_calls] Always `tool_calls`.
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class Text < OpenAI::Internal::Type::BaseModel
-
# @!attribute annotations
-
#
-
# @return [Array<OpenAI::Models::Beta::Threads::FileCitationAnnotation, OpenAI::Models::Beta::Threads::FilePathAnnotation>]
-
1
required :annotations,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::Annotation]
-
}
-
-
# @!attribute value
-
# The data that makes up the text.
-
#
-
# @return [String]
-
1
required :value, String
-
-
# @!method initialize(annotations:, value:)
-
# @param annotations [Array<OpenAI::Models::Beta::Threads::FileCitationAnnotation, OpenAI::Models::Beta::Threads::FilePathAnnotation>]
-
#
-
# @param value [String] The data that makes up the text.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class TextContentBlock < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
#
-
# @return [OpenAI::Models::Beta::Threads::Text]
-
1
required :text, -> { OpenAI::Beta::Threads::Text }
-
-
# @!attribute type
-
# Always `text`.
-
#
-
# @return [Symbol, :text]
-
1
required :type, const: :text
-
-
# @!method initialize(text:, type: :text)
-
# The text content that is part of a message.
-
#
-
# @param text [OpenAI::Models::Beta::Threads::Text]
-
#
-
# @param type [Symbol, :text] Always `text`.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class TextContentBlockParam < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# Text content to be sent to the model
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# Always `text`.
-
#
-
# @return [Symbol, :text]
-
1
required :type, const: :text
-
-
# @!method initialize(text:, type: :text)
-
# The text content that is part of a message.
-
#
-
# @param text [String] Text content to be sent to the model
-
#
-
# @param type [Symbol, :text] Always `text`.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class TextDelta < OpenAI::Internal::Type::BaseModel
-
# @!attribute annotations
-
#
-
# @return [Array<OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation, OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation>, nil]
-
1
optional :annotations,
-
-> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Beta::Threads::AnnotationDelta] }
-
-
# @!attribute value
-
# The data that makes up the text.
-
#
-
# @return [String, nil]
-
1
optional :value, String
-
-
# @!method initialize(annotations: nil, value: nil)
-
# @param annotations [Array<OpenAI::Models::Beta::Threads::FileCitationDeltaAnnotation, OpenAI::Models::Beta::Threads::FilePathDeltaAnnotation>]
-
#
-
# @param value [String] The data that makes up the text.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Beta
-
1
module Threads
-
1
class TextDeltaBlock < OpenAI::Internal::Type::BaseModel
-
# @!attribute index
-
# The index of the content part in the message.
-
#
-
# @return [Integer]
-
1
required :index, Integer
-
-
# @!attribute type
-
# Always `text`.
-
#
-
# @return [Symbol, :text]
-
1
required :type, const: :text
-
-
# @!attribute text
-
#
-
# @return [OpenAI::Models::Beta::Threads::TextDelta, nil]
-
1
optional :text, -> { OpenAI::Beta::Threads::TextDelta }
-
-
# @!method initialize(index:, text: nil, type: :text)
-
# The text content that is part of a message.
-
#
-
# @param index [Integer] The index of the content part in the message.
-
#
-
# @param text [OpenAI::Models::Beta::Threads::TextDelta]
-
#
-
# @param type [Symbol, :text] Always `text`.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
# @see OpenAI::Resources::Chat::Completions#create
-
#
-
# @see OpenAI::Resources::Chat::Completions#stream_raw
-
1
class ChatCompletion < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# A unique identifier for the chat completion.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute choices
-
# A list of chat completion choices. Can be more than one if `n` is greater
-
# than 1.
-
#
-
# @return [Array<OpenAI::Models::Chat::ChatCompletion::Choice>]
-
1
required :choices, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletion::Choice] }
-
-
# @!attribute created
-
# The Unix timestamp (in seconds) of when the chat completion was created.
-
#
-
# @return [Integer]
-
1
required :created, Integer
-
-
# @!attribute model
-
# The model used for the chat completion.
-
#
-
# @return [String]
-
1
required :model, String
-
-
# @!attribute object
-
# The object type, which is always `chat.completion`.
-
#
-
# @return [Symbol, :"chat.completion"]
-
1
required :object, const: :"chat.completion"
-
-
# @!attribute service_tier
-
# Specifies the processing type used for serving the request.
-
#
-
# - If set to 'auto', then the request will be processed with the service tier
-
# configured in the Project settings. Unless otherwise configured, the Project
-
# will use 'default'.
-
# - If set to 'default', then the request will be processed with the standard
-
# pricing and performance for the selected model.
-
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-
# 'priority', then the request will be processed with the corresponding service
-
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-
# Priority processing.
-
# - When not set, the default behavior is 'auto'.
-
#
-
# When the `service_tier` parameter is set, the response body will include the
-
# `service_tier` value based on the processing mode actually used to serve the
-
# request. This response value may be different from the value set in the
-
# parameter.
-
#
-
# @return [Symbol, OpenAI::Models::Chat::ChatCompletion::ServiceTier, nil]
-
1
optional :service_tier, enum: -> { OpenAI::Chat::ChatCompletion::ServiceTier }, nil?: true
-
-
# @!attribute system_fingerprint
-
# This fingerprint represents the backend configuration that the model runs with.
-
#
-
# Can be used in conjunction with the `seed` request parameter to understand when
-
# backend changes have been made that might impact determinism.
-
#
-
# @return [String, nil]
-
1
optional :system_fingerprint, String
-
-
# @!attribute usage
-
# Usage statistics for the completion request.
-
#
-
# @return [OpenAI::Models::CompletionUsage, nil]
-
1
optional :usage, -> { OpenAI::CompletionUsage }
-
-
# @!method initialize(id:, choices:, created:, model:, service_tier: nil, system_fingerprint: nil, usage: nil, object: :"chat.completion")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletion} for more details.
-
#
-
# Represents a chat completion response returned by model, based on the provided
-
# input.
-
#
-
# @param id [String] A unique identifier for the chat completion.
-
#
-
# @param choices [Array<OpenAI::Models::Chat::ChatCompletion::Choice>] A list of chat completion choices. Can be more than one if `n` is greater than 1
-
#
-
# @param created [Integer] The Unix timestamp (in seconds) of when the chat completion was created.
-
#
-
# @param model [String] The model used for the chat completion.
-
#
-
# @param service_tier [Symbol, OpenAI::Models::Chat::ChatCompletion::ServiceTier, nil] Specifies the processing type used for serving the request.
-
#
-
# @param system_fingerprint [String] This fingerprint represents the backend configuration that the model runs with.
-
#
-
# @param usage [OpenAI::Models::CompletionUsage] Usage statistics for the completion request.
-
#
-
# @param object [Symbol, :"chat.completion"] The object type, which is always `chat.completion`.
-
-
1
class Choice < OpenAI::Internal::Type::BaseModel
-
# @!attribute finish_reason
-
# The reason the model stopped generating tokens. This will be `stop` if the model
-
# hit a natural stop point or a provided stop sequence, `length` if the maximum
-
# number of tokens specified in the request was reached, `content_filter` if
-
# content was omitted due to a flag from our content filters, `tool_calls` if the
-
# model called a tool, or `function_call` (deprecated) if the model called a
-
# function.
-
#
-
# @return [Symbol, OpenAI::Models::Chat::ChatCompletion::Choice::FinishReason]
-
1
required :finish_reason, enum: -> { OpenAI::Chat::ChatCompletion::Choice::FinishReason }
-
-
# @!attribute index
-
# The index of the choice in the list of choices.
-
#
-
# @return [Integer]
-
1
required :index, Integer
-
-
# @!attribute logprobs
-
# Log probability information for the choice.
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletion::Choice::Logprobs, nil]
-
1
required :logprobs, -> { OpenAI::Chat::ChatCompletion::Choice::Logprobs }, nil?: true
-
-
# @!attribute message
-
# A chat completion message generated by the model.
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionMessage]
-
1
required :message, -> { OpenAI::Chat::ChatCompletionMessage }
-
-
# @!method initialize(finish_reason:, index:, logprobs:, message:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletion::Choice} for more details.
-
#
-
# @param finish_reason [Symbol, OpenAI::Models::Chat::ChatCompletion::Choice::FinishReason] The reason the model stopped generating tokens. This will be `stop` if the model
-
#
-
# @param index [Integer] The index of the choice in the list of choices.
-
#
-
# @param logprobs [OpenAI::Models::Chat::ChatCompletion::Choice::Logprobs, nil] Log probability information for the choice.
-
#
-
# @param message [OpenAI::Models::Chat::ChatCompletionMessage] A chat completion message generated by the model.
-
-
# The reason the model stopped generating tokens. This will be `stop` if the model
-
# hit a natural stop point or a provided stop sequence, `length` if the maximum
-
# number of tokens specified in the request was reached, `content_filter` if
-
# content was omitted due to a flag from our content filters, `tool_calls` if the
-
# model called a tool, or `function_call` (deprecated) if the model called a
-
# function.
-
#
-
# @see OpenAI::Models::Chat::ChatCompletion::Choice#finish_reason
-
1
module FinishReason
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
STOP = :stop
-
1
LENGTH = :length
-
1
TOOL_CALLS = :tool_calls
-
1
CONTENT_FILTER = :content_filter
-
1
FUNCTION_CALL = :function_call
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @see OpenAI::Models::Chat::ChatCompletion::Choice#logprobs
-
1
class Logprobs < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# A list of message content tokens with log probability information.
-
#
-
# @return [Array<OpenAI::Models::Chat::ChatCompletionTokenLogprob>, nil]
-
1
required :content,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTokenLogprob] },
-
nil?: true
-
-
# @!attribute refusal
-
# A list of message refusal tokens with log probability information.
-
#
-
# @return [Array<OpenAI::Models::Chat::ChatCompletionTokenLogprob>, nil]
-
1
required :refusal,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTokenLogprob] },
-
nil?: true
-
-
# @!method initialize(content:, refusal:)
-
# Log probability information for the choice.
-
#
-
# @param content [Array<OpenAI::Models::Chat::ChatCompletionTokenLogprob>, nil] A list of message content tokens with log probability information.
-
#
-
# @param refusal [Array<OpenAI::Models::Chat::ChatCompletionTokenLogprob>, nil] A list of message refusal tokens with log probability information.
-
end
-
end
-
-
# Specifies the processing type used for serving the request.
-
#
-
# - If set to 'auto', then the request will be processed with the service tier
-
# configured in the Project settings. Unless otherwise configured, the Project
-
# will use 'default'.
-
# - If set to 'default', then the request will be processed with the standard
-
# pricing and performance for the selected model.
-
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-
# 'priority', then the request will be processed with the corresponding service
-
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-
# Priority processing.
-
# - When not set, the default behavior is 'auto'.
-
#
-
# When the `service_tier` parameter is set, the response body will include the
-
# `service_tier` value based on the processing mode actually used to serve the
-
# request. This response value may be different from the value set in the
-
# parameter.
-
#
-
# @see OpenAI::Models::Chat::ChatCompletion#service_tier
-
1
module ServiceTier
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
DEFAULT = :default
-
1
FLEX = :flex
-
1
SCALE = :scale
-
1
PRIORITY = :priority
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
-
1
ChatCompletion = Chat::ChatCompletion
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionAssistantMessageParam < OpenAI::Internal::Type::BaseModel
-
# @!attribute role
-
# The role of the messages author, in this case `assistant`.
-
#
-
# @return [Symbol, :assistant]
-
1
required :role, const: :assistant
-
-
# @!attribute audio
-
# Data about a previous audio response from the model.
-
# [Learn more](https://platform.openai.com/docs/guides/audio).
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio, nil]
-
1
optional :audio, -> { OpenAI::Chat::ChatCompletionAssistantMessageParam::Audio }, nil?: true
-
-
# @!attribute content
-
# The contents of the assistant message. Required unless `tool_calls` or
-
# `function_call` is specified.
-
#
-
# @return [String, Array<OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartRefusal>, nil]
-
1
optional :content,
-
union: -> {
-
OpenAI::Chat::ChatCompletionAssistantMessageParam::Content
-
},
-
nil?: true
-
-
# @!attribute function_call
-
# @deprecated
-
#
-
# Deprecated and replaced by `tool_calls`. The name and arguments of a function
-
# that should be called, as generated by the model.
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall, nil]
-
1
optional :function_call,
-
-> { OpenAI::Chat::ChatCompletionAssistantMessageParam::FunctionCall },
-
nil?: true
-
-
# @!attribute name
-
# An optional name for the participant. Provides the model information to
-
# differentiate between participants of the same role.
-
#
-
# @return [String, nil]
-
1
optional :name, String
-
-
# @!attribute refusal
-
# The refusal message by the assistant.
-
#
-
# @return [String, nil]
-
1
optional :refusal, String, nil?: true
-
-
# @!attribute tool_calls
-
# The tool calls generated by the model, such as function calls.
-
#
-
# @return [Array<OpenAI::Models::Chat::ChatCompletionMessageToolCall>, nil]
-
1
optional :tool_calls,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionMessageToolCall]
-
}
-
-
# @!method initialize(audio: nil, content: nil, function_call: nil, name: nil, refusal: nil, tool_calls: nil, role: :assistant)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionAssistantMessageParam} for more details.
-
#
-
# Messages sent by the model in response to user messages.
-
#
-
# @param audio [OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio, nil] Data about a previous audio response from the model.
-
#
-
# @param content [String, Array<OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartRefusal>, nil] The contents of the assistant message. Required unless `tool_calls` or `function
-
#
-
# @param function_call [OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall, nil] Deprecated and replaced by `tool_calls`. The name and arguments of a function th
-
#
-
# @param name [String] An optional name for the participant. Provides the model information to differen
-
#
-
# @param refusal [String, nil] The refusal message by the assistant.
-
#
-
# @param tool_calls [Array<OpenAI::Models::Chat::ChatCompletionMessageToolCall>] The tool calls generated by the model, such as function calls.
-
#
-
# @param role [Symbol, :assistant] The role of the messages author, in this case `assistant`.
-
-
# @see OpenAI::Models::Chat::ChatCompletionAssistantMessageParam#audio
-
1
class Audio < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for a previous audio response from the model.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!method initialize(id:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Audio} for more
-
# details.
-
#
-
# Data about a previous audio response from the model.
-
# [Learn more](https://platform.openai.com/docs/guides/audio).
-
#
-
# @param id [String] Unique identifier for a previous audio response from the model.
-
end
-
-
# The contents of the assistant message. Required unless `tool_calls` or
-
# `function_call` is specified.
-
#
-
# @see OpenAI::Models::Chat::ChatCompletionAssistantMessageParam#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# The contents of the assistant message.
-
1
variant String
-
-
# An array of content parts with a defined type. Can be one or more of type `text`, or exactly one of type `refusal`.
-
1
variant -> { OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::Content::ArrayOfContentPartArray }
-
-
# Learn about
-
# [text inputs](https://platform.openai.com/docs/guides/text-generation).
-
1
module ArrayOfContentPart
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation).
-
1
variant :text, -> { OpenAI::Chat::ChatCompletionContentPartText }
-
-
1
variant :refusal, -> { OpenAI::Chat::ChatCompletionContentPartRefusal }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartRefusal)]
-
end
-
-
# @!method self.variants
-
# @return [Array(String, Array<OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartRefusal>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
ArrayOfContentPartArray =
-
1
OpenAI::Internal::Type::ArrayOf[union: -> {
-
OpenAI::Chat::ChatCompletionAssistantMessageParam::Content::ArrayOfContentPart
-
}]
-
end
-
-
# @deprecated
-
#
-
# @see OpenAI::Models::Chat::ChatCompletionAssistantMessageParam#function_call
-
1
class FunctionCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute arguments
-
# The arguments to call the function with, as generated by the model in JSON
-
# format. Note that the model does not always generate valid JSON, and may
-
# hallucinate parameters not defined by your function schema. Validate the
-
# arguments in your code before calling your function.
-
#
-
# @return [String]
-
1
required :arguments, String
-
-
# @!attribute name
-
# The name of the function to call.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!method initialize(arguments:, name:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionAssistantMessageParam::FunctionCall} for
-
# more details.
-
#
-
# Deprecated and replaced by `tool_calls`. The name and arguments of a function
-
# that should be called, as generated by the model.
-
#
-
# @param arguments [String] The arguments to call the function with, as generated by the model in JSON forma
-
#
-
# @param name [String] The name of the function to call.
-
end
-
end
-
end
-
-
1
ChatCompletionAssistantMessageParam = Chat::ChatCompletionAssistantMessageParam
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionAudio < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for this audio response.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute data
-
# Base64 encoded audio bytes generated by the model, in the format specified in
-
# the request.
-
#
-
# @return [String]
-
1
required :data, String
-
-
# @!attribute expires_at
-
# The Unix timestamp (in seconds) for when this audio response will no longer be
-
# accessible on the server for use in multi-turn conversations.
-
#
-
# @return [Integer]
-
1
required :expires_at, Integer
-
-
# @!attribute transcript
-
# Transcript of the audio generated by the model.
-
#
-
# @return [String]
-
1
required :transcript, String
-
-
# @!method initialize(id:, data:, expires_at:, transcript:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionAudio} for more details.
-
#
-
# If the audio output modality is requested, this object contains data about the
-
# audio response from the model.
-
# [Learn more](https://platform.openai.com/docs/guides/audio).
-
#
-
# @param id [String] Unique identifier for this audio response.
-
#
-
# @param data [String] Base64 encoded audio bytes generated by the model, in the format
-
#
-
# @param expires_at [Integer] The Unix timestamp (in seconds) for when this audio response will
-
#
-
# @param transcript [String] Transcript of the audio generated by the model.
-
end
-
end
-
-
1
ChatCompletionAudio = Chat::ChatCompletionAudio
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionAudioParam < OpenAI::Internal::Type::BaseModel
-
# @!attribute format_
-
# Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`,
-
# or `pcm16`.
-
#
-
# @return [Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Format]
-
1
required :format_, enum: -> { OpenAI::Chat::ChatCompletionAudioParam::Format }, api_name: :format
-
-
# @!attribute voice
-
# The voice the model uses to respond. Supported voices are `alloy`, `ash`,
-
# `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`.
-
#
-
# @return [String, Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Voice]
-
1
required :voice, union: -> { OpenAI::Chat::ChatCompletionAudioParam::Voice }
-
-
# @!method initialize(format_:, voice:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionAudioParam} for more details.
-
#
-
# Parameters for audio output. Required when audio output is requested with
-
# `modalities: ["audio"]`.
-
# [Learn more](https://platform.openai.com/docs/guides/audio).
-
#
-
# @param format_ [Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Format] Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`,
-
#
-
# @param voice [String, Symbol, OpenAI::Models::Chat::ChatCompletionAudioParam::Voice] The voice the model uses to respond. Supported voices are
-
-
# Specifies the output audio format. Must be one of `wav`, `mp3`, `flac`, `opus`,
-
# or `pcm16`.
-
#
-
# @see OpenAI::Models::Chat::ChatCompletionAudioParam#format_
-
1
module Format
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
WAV = :wav
-
1
AAC = :aac
-
1
MP3 = :mp3
-
1
FLAC = :flac
-
1
OPUS = :opus
-
1
PCM16 = :pcm16
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The voice the model uses to respond. Supported voices are `alloy`, `ash`,
-
# `ballad`, `coral`, `echo`, `fable`, `nova`, `onyx`, `sage`, and `shimmer`.
-
#
-
# @see OpenAI::Models::Chat::ChatCompletionAudioParam#voice
-
1
module Voice
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
1
variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::ALLOY }
-
-
1
variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::ASH }
-
-
1
variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::BALLAD }
-
-
1
variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::CORAL }
-
-
1
variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::ECHO }
-
-
1
variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::SAGE }
-
-
1
variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::SHIMMER }
-
-
1
variant const: -> { OpenAI::Models::Chat::ChatCompletionAudioParam::Voice::VERSE }
-
-
# @!method self.variants
-
# @return [Array(String, Symbol)]
-
-
1
define_sorbet_constant!(:Variants) do
-
T.type_alias { T.any(String, OpenAI::Chat::ChatCompletionAudioParam::Voice::TaggedSymbol) }
-
end
-
-
# @!group
-
-
1
ALLOY = :alloy
-
1
ASH = :ash
-
1
BALLAD = :ballad
-
1
CORAL = :coral
-
1
ECHO = :echo
-
1
SAGE = :sage
-
1
SHIMMER = :shimmer
-
1
VERSE = :verse
-
-
# @!endgroup
-
end
-
end
-
end
-
-
1
ChatCompletionAudioParam = Chat::ChatCompletionAudioParam
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionChunk < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# A unique identifier for the chat completion. Each chunk has the same ID.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute choices
-
# A list of chat completion choices. Can contain more than one elements if `n` is
-
# greater than 1. Can also be empty for the last chunk if you set
-
# `stream_options: {"include_usage": true}`.
-
#
-
# @return [Array<OpenAI::Models::Chat::ChatCompletionChunk::Choice>]
-
1
required :choices, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionChunk::Choice] }
-
-
# @!attribute created
-
# The Unix timestamp (in seconds) of when the chat completion was created. Each
-
# chunk has the same timestamp.
-
#
-
# @return [Integer]
-
1
required :created, Integer
-
-
# @!attribute model
-
# The model to generate the completion.
-
#
-
# @return [String]
-
1
required :model, String
-
-
# @!attribute object
-
# The object type, which is always `chat.completion.chunk`.
-
#
-
# @return [Symbol, :"chat.completion.chunk"]
-
1
required :object, const: :"chat.completion.chunk"
-
-
# @!attribute service_tier
-
# Specifies the processing type used for serving the request.
-
#
-
# - If set to 'auto', then the request will be processed with the service tier
-
# configured in the Project settings. Unless otherwise configured, the Project
-
# will use 'default'.
-
# - If set to 'default', then the request will be processed with the standard
-
# pricing and performance for the selected model.
-
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-
# 'priority', then the request will be processed with the corresponding service
-
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-
# Priority processing.
-
# - When not set, the default behavior is 'auto'.
-
#
-
# When the `service_tier` parameter is set, the response body will include the
-
# `service_tier` value based on the processing mode actually used to serve the
-
# request. This response value may be different from the value set in the
-
# parameter.
-
#
-
# @return [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::ServiceTier, nil]
-
1
optional :service_tier, enum: -> { OpenAI::Chat::ChatCompletionChunk::ServiceTier }, nil?: true
-
-
# @!attribute system_fingerprint
-
# This fingerprint represents the backend configuration that the model runs with.
-
# Can be used in conjunction with the `seed` request parameter to understand when
-
# backend changes have been made that might impact determinism.
-
#
-
# @return [String, nil]
-
1
optional :system_fingerprint, String
-
-
# @!attribute usage
-
# An optional field that will only be present when you set
-
# `stream_options: {"include_usage": true}` in your request. When present, it
-
# contains a null value **except for the last chunk** which contains the token
-
# usage statistics for the entire request.
-
#
-
# **NOTE:** If the stream is interrupted or cancelled, you may not receive the
-
# final usage chunk which contains the total token usage for the request.
-
#
-
# @return [OpenAI::Models::CompletionUsage, nil]
-
1
optional :usage, -> { OpenAI::CompletionUsage }, nil?: true
-
-
# @!method initialize(id:, choices:, created:, model:, service_tier: nil, system_fingerprint: nil, usage: nil, object: :"chat.completion.chunk")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionChunk} for more details.
-
#
-
# Represents a streamed chunk of a chat completion response returned by the model,
-
# based on the provided input.
-
# [Learn more](https://platform.openai.com/docs/guides/streaming-responses).
-
#
-
# @param id [String] A unique identifier for the chat completion. Each chunk has the same ID.
-
#
-
# @param choices [Array<OpenAI::Models::Chat::ChatCompletionChunk::Choice>] A list of chat completion choices. Can contain more than one elements if `n` is
-
#
-
# @param created [Integer] The Unix timestamp (in seconds) of when the chat completion was created. Each ch
-
#
-
# @param model [String] The model to generate the completion.
-
#
-
# @param service_tier [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::ServiceTier, nil] Specifies the processing type used for serving the request.
-
#
-
# @param system_fingerprint [String] This fingerprint represents the backend configuration that the model runs with.
-
#
-
# @param usage [OpenAI::Models::CompletionUsage, nil] An optional field that will only be present when you set
-
#
-
# @param object [Symbol, :"chat.completion.chunk"] The object type, which is always `chat.completion.chunk`.
-
-
1
class Choice < OpenAI::Internal::Type::BaseModel
-
# @!attribute delta
-
# A chat completion delta generated by streamed model responses.
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta]
-
1
required :delta, -> { OpenAI::Chat::ChatCompletionChunk::Choice::Delta }
-
-
# @!attribute finish_reason
-
# The reason the model stopped generating tokens. This will be `stop` if the model
-
# hit a natural stop point or a provided stop sequence, `length` if the maximum
-
# number of tokens specified in the request was reached, `content_filter` if
-
# content was omitted due to a flag from our content filters, `tool_calls` if the
-
# model called a tool, or `function_call` (deprecated) if the model called a
-
# function.
-
#
-
# @return [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::Choice::FinishReason, nil]
-
1
required :finish_reason,
-
enum: -> {
-
OpenAI::Chat::ChatCompletionChunk::Choice::FinishReason
-
},
-
nil?: true
-
-
# @!attribute index
-
# The index of the choice in the list of choices.
-
#
-
# @return [Integer]
-
1
required :index, Integer
-
-
# @!attribute logprobs
-
# Log probability information for the choice.
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Logprobs, nil]
-
1
optional :logprobs, -> { OpenAI::Chat::ChatCompletionChunk::Choice::Logprobs }, nil?: true
-
-
# @!method initialize(delta:, finish_reason:, index:, logprobs: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionChunk::Choice} for more details.
-
#
-
# @param delta [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta] A chat completion delta generated by streamed model responses.
-
#
-
# @param finish_reason [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::Choice::FinishReason, nil] The reason the model stopped generating tokens. This will be `stop` if the model
-
#
-
# @param index [Integer] The index of the choice in the list of choices.
-
#
-
# @param logprobs [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Logprobs, nil] Log probability information for the choice.
-
-
# @see OpenAI::Models::Chat::ChatCompletionChunk::Choice#delta
-
1
class Delta < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The contents of the chunk message.
-
#
-
# @return [String, nil]
-
1
optional :content, String, nil?: true
-
-
# @!attribute function_call
-
# @deprecated
-
#
-
# Deprecated and replaced by `tool_calls`. The name and arguments of a function
-
# that should be called, as generated by the model.
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall, nil]
-
1
optional :function_call, -> { OpenAI::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall }
-
-
# @!attribute refusal
-
# The refusal message generated by the model.
-
#
-
# @return [String, nil]
-
1
optional :refusal, String, nil?: true
-
-
# @!attribute role
-
# The role of the author of this message.
-
#
-
# @return [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::Role, nil]
-
1
optional :role, enum: -> { OpenAI::Chat::ChatCompletionChunk::Choice::Delta::Role }
-
-
# @!attribute tool_calls
-
#
-
# @return [Array<OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall>, nil]
-
1
optional :tool_calls,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall]
-
}
-
-
# @!method initialize(content: nil, function_call: nil, refusal: nil, role: nil, tool_calls: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta} for more details.
-
#
-
# A chat completion delta generated by streamed model responses.
-
#
-
# @param content [String, nil] The contents of the chunk message.
-
#
-
# @param function_call [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall] Deprecated and replaced by `tool_calls`. The name and arguments of a function th
-
#
-
# @param refusal [String, nil] The refusal message generated by the model.
-
#
-
# @param role [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::Role] The role of the author of this message.
-
#
-
# @param tool_calls [Array<OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall>]
-
-
# @deprecated
-
#
-
# @see OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta#function_call
-
1
class FunctionCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute arguments
-
# The arguments to call the function with, as generated by the model in JSON
-
# format. Note that the model does not always generate valid JSON, and may
-
# hallucinate parameters not defined by your function schema. Validate the
-
# arguments in your code before calling your function.
-
#
-
# @return [String, nil]
-
1
optional :arguments, String
-
-
# @!attribute name
-
# The name of the function to call.
-
#
-
# @return [String, nil]
-
1
optional :name, String
-
-
# @!method initialize(arguments: nil, name: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::FunctionCall} for
-
# more details.
-
#
-
# Deprecated and replaced by `tool_calls`. The name and arguments of a function
-
# that should be called, as generated by the model.
-
#
-
# @param arguments [String] The arguments to call the function with, as generated by the model in JSON forma
-
#
-
# @param name [String] The name of the function to call.
-
end
-
-
# The role of the author of this message.
-
#
-
# @see OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta#role
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
DEVELOPER = :developer
-
1
SYSTEM = :system
-
1
USER = :user
-
1
ASSISTANT = :assistant
-
1
TOOL = :tool
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
class ToolCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute index
-
#
-
# @return [Integer]
-
1
required :index, Integer
-
-
# @!attribute id
-
# The ID of the tool call.
-
#
-
# @return [String, nil]
-
1
optional :id, String
-
-
# @!attribute function
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function, nil]
-
1
optional :function, -> { OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function }
-
-
# @!attribute type
-
# The type of the tool. Currently, only `function` is supported.
-
#
-
# @return [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Type, nil]
-
1
optional :type, enum: -> { OpenAI::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Type }
-
-
# @!method initialize(index:, id: nil, function: nil, type: nil)
-
# @param index [Integer]
-
#
-
# @param id [String] The ID of the tool call.
-
#
-
# @param function [OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function]
-
#
-
# @param type [Symbol, OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Type] The type of the tool. Currently, only `function` is supported.
-
-
# @see OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall#function
-
1
class Function < OpenAI::Internal::Type::BaseModel
-
# @!attribute arguments
-
# The arguments to call the function with, as generated by the model in JSON
-
# format. Note that the model does not always generate valid JSON, and may
-
# hallucinate parameters not defined by your function schema. Validate the
-
# arguments in your code before calling your function.
-
#
-
# @return [String, nil]
-
1
optional :arguments, String
-
-
# @!attribute name
-
# The name of the function to call.
-
#
-
# @return [String, nil]
-
1
optional :name, String
-
-
# @!method initialize(arguments: nil, name: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall::Function}
-
# for more details.
-
#
-
# @param arguments [String] The arguments to call the function with, as generated by the model in JSON forma
-
#
-
# @param name [String] The name of the function to call.
-
end
-
-
# The type of the tool. Currently, only `function` is supported.
-
#
-
# @see OpenAI::Models::Chat::ChatCompletionChunk::Choice::Delta::ToolCall#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
FUNCTION = :function
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
-
# The reason the model stopped generating tokens. This will be `stop` if the model
-
# hit a natural stop point or a provided stop sequence, `length` if the maximum
-
# number of tokens specified in the request was reached, `content_filter` if
-
# content was omitted due to a flag from our content filters, `tool_calls` if the
-
# model called a tool, or `function_call` (deprecated) if the model called a
-
# function.
-
#
-
# @see OpenAI::Models::Chat::ChatCompletionChunk::Choice#finish_reason
-
1
module FinishReason
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
STOP = :stop
-
1
LENGTH = :length
-
1
TOOL_CALLS = :tool_calls
-
1
CONTENT_FILTER = :content_filter
-
1
FUNCTION_CALL = :function_call
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @see OpenAI::Models::Chat::ChatCompletionChunk::Choice#logprobs
-
1
class Logprobs < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# A list of message content tokens with log probability information.
-
#
-
# @return [Array<OpenAI::Models::Chat::ChatCompletionTokenLogprob>, nil]
-
1
required :content,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTokenLogprob] },
-
nil?: true
-
-
# @!attribute refusal
-
# A list of message refusal tokens with log probability information.
-
#
-
# @return [Array<OpenAI::Models::Chat::ChatCompletionTokenLogprob>, nil]
-
1
required :refusal,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTokenLogprob] },
-
nil?: true
-
-
# @!method initialize(content:, refusal:)
-
# Log probability information for the choice.
-
#
-
# @param content [Array<OpenAI::Models::Chat::ChatCompletionTokenLogprob>, nil] A list of message content tokens with log probability information.
-
#
-
# @param refusal [Array<OpenAI::Models::Chat::ChatCompletionTokenLogprob>, nil] A list of message refusal tokens with log probability information.
-
end
-
end
-
-
# Specifies the processing type used for serving the request.
-
#
-
# - If set to 'auto', then the request will be processed with the service tier
-
# configured in the Project settings. Unless otherwise configured, the Project
-
# will use 'default'.
-
# - If set to 'default', then the request will be processed with the standard
-
# pricing and performance for the selected model.
-
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-
# 'priority', then the request will be processed with the corresponding service
-
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-
# Priority processing.
-
# - When not set, the default behavior is 'auto'.
-
#
-
# When the `service_tier` parameter is set, the response body will include the
-
# `service_tier` value based on the processing mode actually used to serve the
-
# request. This response value may be different from the value set in the
-
# parameter.
-
#
-
# @see OpenAI::Models::Chat::ChatCompletionChunk#service_tier
-
1
module ServiceTier
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
DEFAULT = :default
-
1
FLEX = :flex
-
1
SCALE = :scale
-
1
PRIORITY = :priority
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
-
1
ChatCompletionChunk = Chat::ChatCompletionChunk
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
# Learn about
-
# [text inputs](https://platform.openai.com/docs/guides/text-generation).
-
1
module ChatCompletionContentPart
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation).
-
1
variant :text, -> { OpenAI::Chat::ChatCompletionContentPartText }
-
-
# Learn about [image inputs](https://platform.openai.com/docs/guides/vision).
-
1
variant :image_url, -> { OpenAI::Chat::ChatCompletionContentPartImage }
-
-
# Learn about [audio inputs](https://platform.openai.com/docs/guides/audio).
-
1
variant :input_audio, -> { OpenAI::Chat::ChatCompletionContentPartInputAudio }
-
-
# Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text generation.
-
1
variant :file, -> { OpenAI::Chat::ChatCompletionContentPart::File }
-
-
1
class File < OpenAI::Internal::Type::BaseModel
-
# @!attribute file
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionContentPart::File::File]
-
1
required :file, -> { OpenAI::Chat::ChatCompletionContentPart::File::File }
-
-
# @!attribute type
-
# The type of the content part. Always `file`.
-
#
-
# @return [Symbol, :file]
-
1
required :type, const: :file
-
-
# @!method initialize(file:, type: :file)
-
# Learn about [file inputs](https://platform.openai.com/docs/guides/text) for text
-
# generation.
-
#
-
# @param file [OpenAI::Models::Chat::ChatCompletionContentPart::File::File]
-
#
-
# @param type [Symbol, :file] The type of the content part. Always `file`.
-
-
# @see OpenAI::Models::Chat::ChatCompletionContentPart::File#file
-
1
class File < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_data
-
# The base64 encoded file data, used when passing the file to the model as a
-
# string.
-
#
-
# @return [String, nil]
-
1
optional :file_data, String
-
-
# @!attribute file_id
-
# The ID of an uploaded file to use as input.
-
#
-
# @return [String, nil]
-
1
optional :file_id, String
-
-
# @!attribute filename
-
# The name of the file, used when passing the file to the model as a string.
-
#
-
# @return [String, nil]
-
1
optional :filename, String
-
-
# @!method initialize(file_data: nil, file_id: nil, filename: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionContentPart::File::File} for more details.
-
#
-
# @param file_data [String] The base64 encoded file data, used when passing the file to the model
-
#
-
# @param file_id [String] The ID of an uploaded file to use as input.
-
#
-
# @param filename [String] The name of the file, used when passing the file to the model as a
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage, OpenAI::Models::Chat::ChatCompletionContentPartInputAudio, OpenAI::Models::Chat::ChatCompletionContentPart::File)]
-
end
-
end
-
-
1
ChatCompletionContentPart = Chat::ChatCompletionContentPart
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionContentPartImage < OpenAI::Internal::Type::BaseModel
-
# @!attribute image_url
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL]
-
1
required :image_url, -> { OpenAI::Chat::ChatCompletionContentPartImage::ImageURL }
-
-
# @!attribute type
-
# The type of the content part.
-
#
-
# @return [Symbol, :image_url]
-
1
required :type, const: :image_url
-
-
# @!method initialize(image_url:, type: :image_url)
-
# Learn about [image inputs](https://platform.openai.com/docs/guides/vision).
-
#
-
# @param image_url [OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL]
-
#
-
# @param type [Symbol, :image_url] The type of the content part.
-
-
# @see OpenAI::Models::Chat::ChatCompletionContentPartImage#image_url
-
1
class ImageURL < OpenAI::Internal::Type::BaseModel
-
# @!attribute url
-
# Either a URL of the image or the base64 encoded image data.
-
#
-
# @return [String]
-
1
required :url, String
-
-
# @!attribute detail
-
# Specifies the detail level of the image. Learn more in the
-
# [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).
-
#
-
# @return [Symbol, OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::Detail, nil]
-
1
optional :detail, enum: -> { OpenAI::Chat::ChatCompletionContentPartImage::ImageURL::Detail }
-
-
# @!method initialize(url:, detail: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL} for more
-
# details.
-
#
-
# @param url [String] Either a URL of the image or the base64 encoded image data.
-
#
-
# @param detail [Symbol, OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL::Detail] Specifies the detail level of the image. Learn more in the [Vision guide](https:
-
-
# Specifies the detail level of the image. Learn more in the
-
# [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).
-
#
-
# @see OpenAI::Models::Chat::ChatCompletionContentPartImage::ImageURL#detail
-
1
module Detail
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
LOW = :low
-
1
HIGH = :high
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
-
1
ChatCompletionContentPartImage = Chat::ChatCompletionContentPartImage
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionContentPartInputAudio < OpenAI::Internal::Type::BaseModel
-
# @!attribute input_audio
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio]
-
1
required :input_audio, -> { OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio }
-
-
# @!attribute type
-
# The type of the content part. Always `input_audio`.
-
#
-
# @return [Symbol, :input_audio]
-
1
required :type, const: :input_audio
-
-
# @!method initialize(input_audio:, type: :input_audio)
-
# Learn about [audio inputs](https://platform.openai.com/docs/guides/audio).
-
#
-
# @param input_audio [OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio]
-
#
-
# @param type [Symbol, :input_audio] The type of the content part. Always `input_audio`.
-
-
# @see OpenAI::Models::Chat::ChatCompletionContentPartInputAudio#input_audio
-
1
class InputAudio < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Base64 encoded audio data.
-
#
-
# @return [String]
-
1
required :data, String
-
-
# @!attribute format_
-
# The format of the encoded audio data. Currently supports "wav" and "mp3".
-
#
-
# @return [Symbol, OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio::Format]
-
1
required :format_,
-
enum: -> { OpenAI::Chat::ChatCompletionContentPartInputAudio::InputAudio::Format },
-
api_name: :format
-
-
# @!method initialize(data:, format_:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio} for more
-
# details.
-
#
-
# @param data [String] Base64 encoded audio data.
-
#
-
# @param format_ [Symbol, OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio::Format] The format of the encoded audio data. Currently supports "wav" and "mp3".
-
-
# The format of the encoded audio data. Currently supports "wav" and "mp3".
-
#
-
# @see OpenAI::Models::Chat::ChatCompletionContentPartInputAudio::InputAudio#format_
-
1
module Format
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
WAV = :wav
-
1
MP3 = :mp3
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
-
1
ChatCompletionContentPartInputAudio = Chat::ChatCompletionContentPartInputAudio
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionContentPartRefusal < OpenAI::Internal::Type::BaseModel
-
# @!attribute refusal
-
# The refusal message generated by the model.
-
#
-
# @return [String]
-
1
required :refusal, String
-
-
# @!attribute type
-
# The type of the content part.
-
#
-
# @return [Symbol, :refusal]
-
1
required :type, const: :refusal
-
-
# @!method initialize(refusal:, type: :refusal)
-
# @param refusal [String] The refusal message generated by the model.
-
#
-
# @param type [Symbol, :refusal] The type of the content part.
-
end
-
end
-
-
1
ChatCompletionContentPartRefusal = Chat::ChatCompletionContentPartRefusal
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionContentPartText < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The text content.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of the content part.
-
#
-
# @return [Symbol, :text]
-
1
required :type, const: :text
-
-
# @!method initialize(text:, type: :text)
-
# Learn about
-
# [text inputs](https://platform.openai.com/docs/guides/text-generation).
-
#
-
# @param text [String] The text content.
-
#
-
# @param type [Symbol, :text] The type of the content part.
-
end
-
end
-
-
1
ChatCompletionContentPartText = Chat::ChatCompletionContentPartText
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
# @see OpenAI::Resources::Chat::Completions#delete
-
1
class ChatCompletionDeleted < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The ID of the chat completion that was deleted.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute deleted
-
# Whether the chat completion was deleted.
-
#
-
# @return [Boolean]
-
1
required :deleted, OpenAI::Internal::Type::Boolean
-
-
# @!attribute object
-
# The type of object being deleted.
-
#
-
# @return [Symbol, :"chat.completion.deleted"]
-
1
required :object, const: :"chat.completion.deleted"
-
-
# @!method initialize(id:, deleted:, object: :"chat.completion.deleted")
-
# @param id [String] The ID of the chat completion that was deleted.
-
#
-
# @param deleted [Boolean] Whether the chat completion was deleted.
-
#
-
# @param object [Symbol, :"chat.completion.deleted"] The type of object being deleted.
-
end
-
end
-
-
1
ChatCompletionDeleted = Chat::ChatCompletionDeleted
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionDeveloperMessageParam < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The contents of the developer message.
-
#
-
# @return [String, Array<OpenAI::Models::Chat::ChatCompletionContentPartText>]
-
1
required :content, union: -> { OpenAI::Chat::ChatCompletionDeveloperMessageParam::Content }
-
-
# @!attribute role
-
# The role of the messages author, in this case `developer`.
-
#
-
# @return [Symbol, :developer]
-
1
required :role, const: :developer
-
-
# @!attribute name
-
# An optional name for the participant. Provides the model information to
-
# differentiate between participants of the same role.
-
#
-
# @return [String, nil]
-
1
optional :name, String
-
-
# @!method initialize(content:, name: nil, role: :developer)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam} for more details.
-
#
-
# Developer-provided instructions that the model should follow, regardless of
-
# messages sent by the user. With o1 models and newer, `developer` messages
-
# replace the previous `system` messages.
-
#
-
# @param content [String, Array<OpenAI::Models::Chat::ChatCompletionContentPartText>] The contents of the developer message.
-
#
-
# @param name [String] An optional name for the participant. Provides the model information to differen
-
#
-
# @param role [Symbol, :developer] The role of the messages author, in this case `developer`.
-
-
# The contents of the developer message.
-
#
-
# @see OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# The contents of the developer message.
-
1
variant String
-
-
# An array of content parts with a defined type. For developer messages, only type `text` is supported.
-
1
variant -> { OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam::Content::ChatCompletionContentPartTextArray }
-
-
# @!method self.variants
-
# @return [Array(String, Array<OpenAI::Models::Chat::ChatCompletionContentPartText>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
ChatCompletionContentPartTextArray =
-
1
OpenAI::Internal::Type::ArrayOf[-> { OpenAI::Chat::ChatCompletionContentPartText }]
-
end
-
end
-
end
-
-
1
ChatCompletionDeveloperMessageParam = Chat::ChatCompletionDeveloperMessageParam
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionFunctionCallOption < OpenAI::Internal::Type::BaseModel
-
# @!attribute name
-
# The name of the function to call.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!method initialize(name:)
-
# Specifying a particular function via `{"name": "my_function"}` forces the model
-
# to call that function.
-
#
-
# @param name [String] The name of the function to call.
-
end
-
end
-
-
1
ChatCompletionFunctionCallOption = Chat::ChatCompletionFunctionCallOption
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
# @deprecated
-
1
class ChatCompletionFunctionMessageParam < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The contents of the function message.
-
#
-
# @return [String, nil]
-
1
required :content, String, nil?: true
-
-
# @!attribute name
-
# The name of the function to call.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute role
-
# The role of the messages author, in this case `function`.
-
#
-
# @return [Symbol, :function]
-
1
required :role, const: :function
-
-
# @!method initialize(content:, name:, role: :function)
-
# @param content [String, nil] The contents of the function message.
-
#
-
# @param name [String] The name of the function to call.
-
#
-
# @param role [Symbol, :function] The role of the messages author, in this case `function`.
-
end
-
end
-
-
1
ChatCompletionFunctionMessageParam = Chat::ChatCompletionFunctionMessageParam
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionMessage < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The contents of the message.
-
#
-
# @return [String, nil]
-
1
required :content, String, nil?: true
-
-
# @!attribute parsed
-
# The parsed contents of the message, if JSON schema is specified.
-
#
-
# @return [Object, nil]
-
1
optional :parsed, OpenAI::StructuredOutput::ParsedJson
-
-
# @!attribute refusal
-
# The refusal message generated by the model.
-
#
-
# @return [String, nil]
-
1
required :refusal, String, nil?: true
-
-
# @!attribute role
-
# The role of the author of this message.
-
#
-
# @return [Symbol, :assistant]
-
1
required :role, const: :assistant
-
-
# @!attribute annotations
-
# Annotations for the message, when applicable, as when using the
-
# [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
-
#
-
# @return [Array<OpenAI::Models::Chat::ChatCompletionMessage::Annotation>, nil]
-
1
optional :annotations,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionMessage::Annotation] }
-
-
# @!attribute audio
-
# If the audio output modality is requested, this object contains data about the
-
# audio response from the model.
-
# [Learn more](https://platform.openai.com/docs/guides/audio).
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionAudio, nil]
-
1
optional :audio, -> { OpenAI::Chat::ChatCompletionAudio }, nil?: true
-
-
# @!attribute function_call
-
# @deprecated
-
#
-
# Deprecated and replaced by `tool_calls`. The name and arguments of a function
-
# that should be called, as generated by the model.
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall, nil]
-
1
optional :function_call, -> { OpenAI::Chat::ChatCompletionMessage::FunctionCall }
-
-
# @!attribute tool_calls
-
# The tool calls generated by the model, such as function calls.
-
#
-
# @return [Array<OpenAI::Models::Chat::ChatCompletionMessageToolCall>, nil]
-
1
optional :tool_calls,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionMessageToolCall]
-
}
-
-
# @!method initialize(content:, refusal:, annotations: nil, audio: nil, function_call: nil, tool_calls: nil, role: :assistant)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionMessage} for more details.
-
#
-
# A chat completion message generated by the model.
-
#
-
# @param content [String, nil] The contents of the message.
-
#
-
# @param refusal [String, nil] The refusal message generated by the model.
-
#
-
# @param annotations [Array<OpenAI::Models::Chat::ChatCompletionMessage::Annotation>] Annotations for the message, when applicable, as when using the
-
#
-
# @param audio [OpenAI::Models::Chat::ChatCompletionAudio, nil] If the audio output modality is requested, this object contains data
-
#
-
# @param function_call [OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall] Deprecated and replaced by `tool_calls`. The name and arguments of a function th
-
#
-
# @param tool_calls [Array<OpenAI::Models::Chat::ChatCompletionMessageToolCall>] The tool calls generated by the model, such as function calls.
-
#
-
# @param role [Symbol, :assistant] The role of the author of this message.
-
-
1
class Annotation < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of the URL citation. Always `url_citation`.
-
#
-
# @return [Symbol, :url_citation]
-
1
required :type, const: :url_citation
-
-
# @!attribute url_citation
-
# A URL citation when using web search.
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionMessage::Annotation::URLCitation]
-
1
required :url_citation, -> { OpenAI::Chat::ChatCompletionMessage::Annotation::URLCitation }
-
-
# @!method initialize(url_citation:, type: :url_citation)
-
# A URL citation when using web search.
-
#
-
# @param url_citation [OpenAI::Models::Chat::ChatCompletionMessage::Annotation::URLCitation] A URL citation when using web search.
-
#
-
# @param type [Symbol, :url_citation] The type of the URL citation. Always `url_citation`.
-
-
# @see OpenAI::Models::Chat::ChatCompletionMessage::Annotation#url_citation
-
1
class URLCitation < OpenAI::Internal::Type::BaseModel
-
# @!attribute end_index
-
# The index of the last character of the URL citation in the message.
-
#
-
# @return [Integer]
-
1
required :end_index, Integer
-
-
# @!attribute start_index
-
# The index of the first character of the URL citation in the message.
-
#
-
# @return [Integer]
-
1
required :start_index, Integer
-
-
# @!attribute title
-
# The title of the web resource.
-
#
-
# @return [String]
-
1
required :title, String
-
-
# @!attribute url
-
# The URL of the web resource.
-
#
-
# @return [String]
-
1
required :url, String
-
-
# @!method initialize(end_index:, start_index:, title:, url:)
-
# A URL citation when using web search.
-
#
-
# @param end_index [Integer] The index of the last character of the URL citation in the message.
-
#
-
# @param start_index [Integer] The index of the first character of the URL citation in the message.
-
#
-
# @param title [String] The title of the web resource.
-
#
-
# @param url [String] The URL of the web resource.
-
end
-
end
-
-
# @deprecated
-
#
-
# @see OpenAI::Models::Chat::ChatCompletionMessage#function_call
-
1
class FunctionCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute arguments
-
# The arguments to call the function with, as generated by the model in JSON
-
# format. Note that the model does not always generate valid JSON, and may
-
# hallucinate parameters not defined by your function schema. Validate the
-
# arguments in your code before calling your function.
-
#
-
# @return [String]
-
1
required :arguments, String
-
-
# @!attribute name
-
# The name of the function to call.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!method initialize(arguments:, name:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionMessage::FunctionCall} for more details.
-
#
-
# Deprecated and replaced by `tool_calls`. The name and arguments of a function
-
# that should be called, as generated by the model.
-
#
-
# @param arguments [String] The arguments to call the function with, as generated by the model in JSON forma
-
#
-
# @param name [String] The name of the function to call.
-
end
-
end
-
end
-
-
1
ChatCompletionMessage = Chat::ChatCompletionMessage
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
# Developer-provided instructions that the model should follow, regardless of
-
# messages sent by the user. With o1 models and newer, `developer` messages
-
# replace the previous `system` messages.
-
1
module ChatCompletionMessageParam
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :role
-
-
# Developer-provided instructions that the model should follow, regardless of
-
# messages sent by the user. With o1 models and newer, `developer` messages
-
# replace the previous `system` messages.
-
1
variant :developer, -> { OpenAI::Chat::ChatCompletionDeveloperMessageParam }
-
-
# Developer-provided instructions that the model should follow, regardless of
-
# messages sent by the user. With o1 models and newer, use `developer` messages
-
# for this purpose instead.
-
1
variant :system, -> { OpenAI::Chat::ChatCompletionSystemMessageParam }
-
-
# Messages sent by an end user, containing prompts or additional context
-
# information.
-
1
variant :user, -> { OpenAI::Chat::ChatCompletionUserMessageParam }
-
-
# Messages sent by the model in response to user messages.
-
1
variant :assistant, -> { OpenAI::Chat::ChatCompletionAssistantMessageParam }
-
-
1
variant :tool, -> { OpenAI::Chat::ChatCompletionToolMessageParam }
-
-
1
variant :function, -> { OpenAI::Chat::ChatCompletionFunctionMessageParam }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, OpenAI::Models::Chat::ChatCompletionSystemMessageParam, OpenAI::Models::Chat::ChatCompletionUserMessageParam, OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, OpenAI::Models::Chat::ChatCompletionToolMessageParam, OpenAI::Models::Chat::ChatCompletionFunctionMessageParam)]
-
end
-
end
-
-
1
ChatCompletionMessageParam = Chat::ChatCompletionMessageParam
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionMessageToolCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The ID of the tool call.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute function
-
# The function that the model called.
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function]
-
1
required :function, -> { OpenAI::Chat::ChatCompletionMessageToolCall::Function }
-
-
# @!attribute type
-
# The type of the tool. Currently, only `function` is supported.
-
#
-
# @return [Symbol, :function]
-
1
required :type, const: :function
-
-
# @!method initialize(id:, function:, type: :function)
-
# @param id [String] The ID of the tool call.
-
#
-
# @param function [OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function] The function that the model called.
-
#
-
# @param type [Symbol, :function] The type of the tool. Currently, only `function` is supported.
-
-
# @see OpenAI::Models::Chat::ChatCompletionMessageToolCall#function
-
1
class Function < OpenAI::Internal::Type::BaseModel
-
# @!attribute arguments
-
# The arguments to call the function with, as generated by the model in JSON
-
# format. Note that the model does not always generate valid JSON, and may
-
# hallucinate parameters not defined by your function schema. Validate the
-
# arguments in your code before calling your function.
-
#
-
# @return [String]
-
1
required :arguments, String
-
-
# @!attribute parsed
-
# The parsed contents of the arguments.
-
#
-
# @return [Object, nil]
-
1
required :parsed, OpenAI::StructuredOutput::ParsedJson
-
-
# @!attribute name
-
# The name of the function to call.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!method initialize(arguments:, name:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionMessageToolCall::Function} for more
-
# details.
-
#
-
# The function that the model called.
-
#
-
# @param arguments [String] The arguments to call the function with, as generated by the model in JSON forma
-
#
-
# @param name [String] The name of the function to call.
-
end
-
end
-
end
-
-
1
ChatCompletionMessageToolCall = Chat::ChatCompletionMessageToolCall
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
module ChatCompletionModality
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TEXT = :text
-
1
AUDIO = :audio
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
1
ChatCompletionModality = Chat::ChatCompletionModality
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionNamedToolChoice < OpenAI::Internal::Type::BaseModel
-
# @!attribute function
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function]
-
1
required :function, -> { OpenAI::Chat::ChatCompletionNamedToolChoice::Function }
-
-
# @!attribute type
-
# The type of the tool. Currently, only `function` is supported.
-
#
-
# @return [Symbol, :function]
-
1
required :type, const: :function
-
-
# @!method initialize(function:, type: :function)
-
# Specifies a tool the model should use. Use to force the model to call a specific
-
# function.
-
#
-
# @param function [OpenAI::Models::Chat::ChatCompletionNamedToolChoice::Function]
-
#
-
# @param type [Symbol, :function] The type of the tool. Currently, only `function` is supported.
-
-
# @see OpenAI::Models::Chat::ChatCompletionNamedToolChoice#function
-
1
class Function < OpenAI::Internal::Type::BaseModel
-
# @!attribute name
-
# The name of the function to call.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!method initialize(name:)
-
# @param name [String] The name of the function to call.
-
end
-
end
-
end
-
-
1
ChatCompletionNamedToolChoice = Chat::ChatCompletionNamedToolChoice
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionPredictionContent < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content that should be matched when generating a model response. If
-
# generated tokens would match this content, the entire model response can be
-
# returned much more quickly.
-
#
-
# @return [String, Array<OpenAI::Models::Chat::ChatCompletionContentPartText>]
-
1
required :content, union: -> { OpenAI::Chat::ChatCompletionPredictionContent::Content }
-
-
# @!attribute type
-
# The type of the predicted content you want to provide. This type is currently
-
# always `content`.
-
#
-
# @return [Symbol, :content]
-
1
required :type, const: :content
-
-
# @!method initialize(content:, type: :content)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionPredictionContent} for more details.
-
#
-
# Static predicted output content, such as the content of a text file that is
-
# being regenerated.
-
#
-
# @param content [String, Array<OpenAI::Models::Chat::ChatCompletionContentPartText>] The content that should be matched when generating a model response.
-
#
-
# @param type [Symbol, :content] The type of the predicted content you want to provide. This type is
-
-
# The content that should be matched when generating a model response. If
-
# generated tokens would match this content, the entire model response can be
-
# returned much more quickly.
-
#
-
# @see OpenAI::Models::Chat::ChatCompletionPredictionContent#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# The content used for a Predicted Output. This is often the
-
# text of a file you are regenerating with minor changes.
-
1
variant String
-
-
# An array of content parts with a defined type. Supported options differ based on the [model](https://platform.openai.com/docs/models) being used to generate the response. Can contain text inputs.
-
1
variant -> { OpenAI::Models::Chat::ChatCompletionPredictionContent::Content::ChatCompletionContentPartTextArray }
-
-
# @!method self.variants
-
# @return [Array(String, Array<OpenAI::Models::Chat::ChatCompletionContentPartText>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
ChatCompletionContentPartTextArray =
-
1
OpenAI::Internal::Type::ArrayOf[-> { OpenAI::Chat::ChatCompletionContentPartText }]
-
end
-
end
-
end
-
-
1
ChatCompletionPredictionContent = Chat::ChatCompletionPredictionContent
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
ChatCompletionReasoningEffort = OpenAI::Models::ReasoningEffort
-
end
-
-
1
ChatCompletionReasoningEffort = Chat::ChatCompletionReasoningEffort
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
# The role of the author of a message
-
1
module ChatCompletionRole
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
DEVELOPER = :developer
-
1
SYSTEM = :system
-
1
USER = :user
-
1
ASSISTANT = :assistant
-
1
TOOL = :tool
-
1
FUNCTION = :function
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
1
ChatCompletionRole = Chat::ChatCompletionRole
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionStoreMessage < OpenAI::Models::Chat::ChatCompletionMessage
-
# @!attribute id
-
# The identifier of the chat message.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute content_parts
-
# If a content parts array was provided, this is an array of `text` and
-
# `image_url` parts. Otherwise, null.
-
#
-
# @return [Array<OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage>, nil]
-
1
optional :content_parts,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Chat::ChatCompletionStoreMessage::ContentPart]
-
},
-
nil?: true
-
-
# @!method initialize(id:, content_parts: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionStoreMessage} for more details.
-
#
-
# A chat completion message generated by the model.
-
#
-
# @param id [String] The identifier of the chat message.
-
#
-
# @param content_parts [Array<OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage>, nil] If a content parts array was provided, this is an array of `text` and `image_url
-
-
# Learn about
-
# [text inputs](https://platform.openai.com/docs/guides/text-generation).
-
1
module ContentPart
-
1
extend OpenAI::Internal::Type::Union
-
-
# Learn about [text inputs](https://platform.openai.com/docs/guides/text-generation).
-
1
variant -> { OpenAI::Chat::ChatCompletionContentPartText }
-
-
# Learn about [image inputs](https://platform.openai.com/docs/guides/vision).
-
1
variant -> { OpenAI::Chat::ChatCompletionContentPartImage }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage)]
-
end
-
end
-
end
-
-
1
ChatCompletionStoreMessage = Chat::ChatCompletionStoreMessage
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionStreamOptions < OpenAI::Internal::Type::BaseModel
-
# @!attribute include_usage
-
# If set, an additional chunk will be streamed before the `data: [DONE]` message.
-
# The `usage` field on this chunk shows the token usage statistics for the entire
-
# request, and the `choices` field will always be an empty array.
-
#
-
# All other chunks will also include a `usage` field, but with a null value.
-
# **NOTE:** If the stream is interrupted, you may not receive the final usage
-
# chunk which contains the total token usage for the request.
-
#
-
# @return [Boolean, nil]
-
1
optional :include_usage, OpenAI::Internal::Type::Boolean
-
-
# @!method initialize(include_usage: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionStreamOptions} for more details.
-
#
-
# Options for streaming response. Only set this when you set `stream: true`.
-
#
-
# @param include_usage [Boolean] If set, an additional chunk will be streamed before the `data: [DONE]`
-
end
-
end
-
-
1
ChatCompletionStreamOptions = Chat::ChatCompletionStreamOptions
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionSystemMessageParam < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The contents of the system message.
-
#
-
# @return [String, Array<OpenAI::Models::Chat::ChatCompletionContentPartText>]
-
1
required :content, union: -> { OpenAI::Chat::ChatCompletionSystemMessageParam::Content }
-
-
# @!attribute role
-
# The role of the messages author, in this case `system`.
-
#
-
# @return [Symbol, :system]
-
1
required :role, const: :system
-
-
# @!attribute name
-
# An optional name for the participant. Provides the model information to
-
# differentiate between participants of the same role.
-
#
-
# @return [String, nil]
-
1
optional :name, String
-
-
# @!method initialize(content:, name: nil, role: :system)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionSystemMessageParam} for more details.
-
#
-
# Developer-provided instructions that the model should follow, regardless of
-
# messages sent by the user. With o1 models and newer, use `developer` messages
-
# for this purpose instead.
-
#
-
# @param content [String, Array<OpenAI::Models::Chat::ChatCompletionContentPartText>] The contents of the system message.
-
#
-
# @param name [String] An optional name for the participant. Provides the model information to differen
-
#
-
# @param role [Symbol, :system] The role of the messages author, in this case `system`.
-
-
# The contents of the system message.
-
#
-
# @see OpenAI::Models::Chat::ChatCompletionSystemMessageParam#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# The contents of the system message.
-
1
variant String
-
-
# An array of content parts with a defined type. For system messages, only type `text` is supported.
-
1
variant -> { OpenAI::Models::Chat::ChatCompletionSystemMessageParam::Content::ChatCompletionContentPartTextArray }
-
-
# @!method self.variants
-
# @return [Array(String, Array<OpenAI::Models::Chat::ChatCompletionContentPartText>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
ChatCompletionContentPartTextArray =
-
1
OpenAI::Internal::Type::ArrayOf[-> { OpenAI::Chat::ChatCompletionContentPartText }]
-
end
-
end
-
end
-
-
1
ChatCompletionSystemMessageParam = Chat::ChatCompletionSystemMessageParam
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionTokenLogprob < OpenAI::Internal::Type::BaseModel
-
# @!attribute token
-
# The token.
-
#
-
# @return [String]
-
1
required :token, String
-
-
# @!attribute bytes
-
# A list of integers representing the UTF-8 bytes representation of the token.
-
# Useful in instances where characters are represented by multiple tokens and
-
# their byte representations must be combined to generate the correct text
-
# representation. Can be `null` if there is no bytes representation for the token.
-
#
-
# @return [Array<Integer>, nil]
-
1
required :bytes, OpenAI::Internal::Type::ArrayOf[Integer], nil?: true
-
-
# @!attribute logprob
-
# The log probability of this token, if it is within the top 20 most likely
-
# tokens. Otherwise, the value `-9999.0` is used to signify that the token is very
-
# unlikely.
-
#
-
# @return [Float]
-
1
required :logprob, Float
-
-
# @!attribute top_logprobs
-
# List of the most likely tokens and their log probability, at this token
-
# position. In rare cases, there may be fewer than the number of requested
-
# `top_logprobs` returned.
-
#
-
# @return [Array<OpenAI::Models::Chat::ChatCompletionTokenLogprob::TopLogprob>]
-
1
required :top_logprobs,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTokenLogprob::TopLogprob] }
-
-
# @!method initialize(token:, bytes:, logprob:, top_logprobs:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionTokenLogprob} for more details.
-
#
-
# @param token [String] The token.
-
#
-
# @param bytes [Array<Integer>, nil] A list of integers representing the UTF-8 bytes representation of the token. Use
-
#
-
# @param logprob [Float] The log probability of this token, if it is within the top 20 most likely tokens
-
#
-
# @param top_logprobs [Array<OpenAI::Models::Chat::ChatCompletionTokenLogprob::TopLogprob>] List of the most likely tokens and their log probability, at this token position
-
-
1
class TopLogprob < OpenAI::Internal::Type::BaseModel
-
# @!attribute token
-
# The token.
-
#
-
# @return [String]
-
1
required :token, String
-
-
# @!attribute bytes
-
# A list of integers representing the UTF-8 bytes representation of the token.
-
# Useful in instances where characters are represented by multiple tokens and
-
# their byte representations must be combined to generate the correct text
-
# representation. Can be `null` if there is no bytes representation for the token.
-
#
-
# @return [Array<Integer>, nil]
-
1
required :bytes, OpenAI::Internal::Type::ArrayOf[Integer], nil?: true
-
-
# @!attribute logprob
-
# The log probability of this token, if it is within the top 20 most likely
-
# tokens. Otherwise, the value `-9999.0` is used to signify that the token is very
-
# unlikely.
-
#
-
# @return [Float]
-
1
required :logprob, Float
-
-
# @!method initialize(token:, bytes:, logprob:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionTokenLogprob::TopLogprob} for more details.
-
#
-
# @param token [String] The token.
-
#
-
# @param bytes [Array<Integer>, nil] A list of integers representing the UTF-8 bytes representation of the token. Use
-
#
-
# @param logprob [Float] The log probability of this token, if it is within the top 20 most likely tokens
-
end
-
end
-
end
-
-
1
ChatCompletionTokenLogprob = Chat::ChatCompletionTokenLogprob
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionTool < OpenAI::Internal::Type::BaseModel
-
# @!attribute function
-
#
-
# @return [OpenAI::Models::FunctionDefinition]
-
1
required :function, -> { OpenAI::FunctionDefinition }
-
-
# @!attribute type
-
# The type of the tool. Currently, only `function` is supported.
-
#
-
# @return [Symbol, :function]
-
1
required :type, const: :function
-
-
# @!method initialize(function:, type: :function)
-
# @param function [OpenAI::Models::FunctionDefinition]
-
#
-
# @param type [Symbol, :function] The type of the tool. Currently, only `function` is supported.
-
end
-
end
-
-
1
ChatCompletionTool = Chat::ChatCompletionTool
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
# Controls which (if any) tool is called by the model. `none` means the model will
-
# not call any tool and instead generates a message. `auto` means the model can
-
# pick between generating a message or calling one or more tools. `required` means
-
# the model must call one or more tools. Specifying a particular tool via
-
# `{"type": "function", "function": {"name": "my_function"}}` forces the model to
-
# call that tool.
-
#
-
# `none` is the default when no tools are present. `auto` is the default if tools
-
# are present.
-
1
module ChatCompletionToolChoiceOption
-
1
extend OpenAI::Internal::Type::Union
-
-
# `none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools.
-
1
variant enum: -> { OpenAI::Chat::ChatCompletionToolChoiceOption::Auto }
-
-
# Specifies a tool the model should use. Use to force the model to call a specific function.
-
1
variant -> { OpenAI::Chat::ChatCompletionNamedToolChoice }
-
-
# `none` means the model will not call any tool and instead generates a message.
-
# `auto` means the model can pick between generating a message or calling one or
-
# more tools. `required` means the model must call one or more tools.
-
1
module Auto
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
NONE = :none
-
1
AUTO = :auto
-
1
REQUIRED = :required
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @!method self.variants
-
# @return [Array(Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice)]
-
end
-
end
-
-
1
ChatCompletionToolChoiceOption = Chat::ChatCompletionToolChoiceOption
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionToolMessageParam < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The contents of the tool message.
-
#
-
# @return [String, Array<OpenAI::Models::Chat::ChatCompletionContentPartText>]
-
1
required :content, union: -> { OpenAI::Chat::ChatCompletionToolMessageParam::Content }
-
-
# @!attribute role
-
# The role of the messages author, in this case `tool`.
-
#
-
# @return [Symbol, :tool]
-
1
required :role, const: :tool
-
-
# @!attribute tool_call_id
-
# Tool call that this message is responding to.
-
#
-
# @return [String]
-
1
required :tool_call_id, String
-
-
# @!method initialize(content:, tool_call_id:, role: :tool)
-
# @param content [String, Array<OpenAI::Models::Chat::ChatCompletionContentPartText>] The contents of the tool message.
-
#
-
# @param tool_call_id [String] Tool call that this message is responding to.
-
#
-
# @param role [Symbol, :tool] The role of the messages author, in this case `tool`.
-
-
# The contents of the tool message.
-
#
-
# @see OpenAI::Models::Chat::ChatCompletionToolMessageParam#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# The contents of the tool message.
-
1
variant String
-
-
# An array of content parts with a defined type. For tool messages, only type `text` is supported.
-
1
variant -> { OpenAI::Models::Chat::ChatCompletionToolMessageParam::Content::ChatCompletionContentPartTextArray }
-
-
# @!method self.variants
-
# @return [Array(String, Array<OpenAI::Models::Chat::ChatCompletionContentPartText>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
ChatCompletionContentPartTextArray =
-
1
OpenAI::Internal::Type::ArrayOf[-> { OpenAI::Chat::ChatCompletionContentPartText }]
-
end
-
end
-
end
-
-
1
ChatCompletionToolMessageParam = Chat::ChatCompletionToolMessageParam
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
class ChatCompletionUserMessageParam < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The contents of the user message.
-
#
-
# @return [String, Array<OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage, OpenAI::Models::Chat::ChatCompletionContentPartInputAudio, OpenAI::Models::Chat::ChatCompletionContentPart::File>]
-
1
required :content, union: -> { OpenAI::Chat::ChatCompletionUserMessageParam::Content }
-
-
# @!attribute role
-
# The role of the messages author, in this case `user`.
-
#
-
# @return [Symbol, :user]
-
1
required :role, const: :user
-
-
# @!attribute name
-
# An optional name for the participant. Provides the model information to
-
# differentiate between participants of the same role.
-
#
-
# @return [String, nil]
-
1
optional :name, String
-
-
# @!method initialize(content:, name: nil, role: :user)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::ChatCompletionUserMessageParam} for more details.
-
#
-
# Messages sent by an end user, containing prompts or additional context
-
# information.
-
#
-
# @param content [String, Array<OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage, OpenAI::Models::Chat::ChatCompletionContentPartInputAudio, OpenAI::Models::Chat::ChatCompletionContentPart::File>] The contents of the user message.
-
#
-
# @param name [String] An optional name for the participant. Provides the model information to differen
-
#
-
# @param role [Symbol, :user] The role of the messages author, in this case `user`.
-
-
# The contents of the user message.
-
#
-
# @see OpenAI::Models::Chat::ChatCompletionUserMessageParam#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# The text contents of the message.
-
1
variant String
-
-
# An array of content parts with a defined type. Supported options differ based on the [model](https://platform.openai.com/docs/models) being used to generate the response. Can contain text, image, or audio inputs.
-
1
variant -> { OpenAI::Models::Chat::ChatCompletionUserMessageParam::Content::ChatCompletionContentPartArray }
-
-
# @!method self.variants
-
# @return [Array(String, Array<OpenAI::Models::Chat::ChatCompletionContentPartText, OpenAI::Models::Chat::ChatCompletionContentPartImage, OpenAI::Models::Chat::ChatCompletionContentPartInputAudio, OpenAI::Models::Chat::ChatCompletionContentPart::File>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
ChatCompletionContentPartArray =
-
1
OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Chat::ChatCompletionContentPart }]
-
end
-
end
-
end
-
-
1
ChatCompletionUserMessageParam = Chat::ChatCompletionUserMessageParam
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
# @see OpenAI::Resources::Chat::Completions#create
-
#
-
# @see OpenAI::Resources::Chat::Completions#stream_raw
-
1
class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute messages
-
# A list of messages comprising the conversation so far. Depending on the
-
# [model](https://platform.openai.com/docs/models) you use, different message
-
# types (modalities) are supported, like
-
# [text](https://platform.openai.com/docs/guides/text-generation),
-
# [images](https://platform.openai.com/docs/guides/vision), and
-
# [audio](https://platform.openai.com/docs/guides/audio).
-
#
-
# @return [Array<OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, OpenAI::Models::Chat::ChatCompletionSystemMessageParam, OpenAI::Models::Chat::ChatCompletionUserMessageParam, OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, OpenAI::Models::Chat::ChatCompletionToolMessageParam, OpenAI::Models::Chat::ChatCompletionFunctionMessageParam>]
-
1
required :messages,
-
-> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Chat::ChatCompletionMessageParam] }
-
-
# @!attribute model
-
# Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
-
# wide range of models with different capabilities, performance characteristics,
-
# and price points. Refer to the
-
# [model guide](https://platform.openai.com/docs/models) to browse and compare
-
# available models.
-
#
-
# @return [String, Symbol, OpenAI::Models::ChatModel]
-
1
required :model, union: -> { OpenAI::Chat::CompletionCreateParams::Model }
-
-
# @!attribute audio
-
# Parameters for audio output. Required when audio output is requested with
-
# `modalities: ["audio"]`.
-
# [Learn more](https://platform.openai.com/docs/guides/audio).
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionAudioParam, nil]
-
1
optional :audio, -> { OpenAI::Chat::ChatCompletionAudioParam }, nil?: true
-
-
# @!attribute frequency_penalty
-
# Number between -2.0 and 2.0. Positive values penalize new tokens based on their
-
# existing frequency in the text so far, decreasing the model's likelihood to
-
# repeat the same line verbatim.
-
#
-
# @return [Float, nil]
-
1
optional :frequency_penalty, Float, nil?: true
-
-
# @!attribute function_call
-
# @deprecated
-
#
-
# Deprecated in favor of `tool_choice`.
-
#
-
# Controls which (if any) function is called by the model.
-
#
-
# `none` means the model will not call a function and instead generates a message.
-
#
-
# `auto` means the model can pick between generating a message or calling a
-
# function.
-
#
-
# Specifying a particular function via `{"name": "my_function"}` forces the model
-
# to call that function.
-
#
-
# `none` is the default when no functions are present. `auto` is the default if
-
# functions are present.
-
#
-
# @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode, OpenAI::Models::Chat::ChatCompletionFunctionCallOption, nil]
-
1
optional :function_call, union: -> { OpenAI::Chat::CompletionCreateParams::FunctionCall }
-
-
# @!attribute functions
-
# @deprecated
-
#
-
# Deprecated in favor of `tools`.
-
#
-
# A list of functions the model may generate JSON inputs for.
-
#
-
# @return [Array<OpenAI::Models::Chat::CompletionCreateParams::Function>, nil]
-
1
optional :functions,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::CompletionCreateParams::Function] }
-
-
# @!attribute logit_bias
-
# Modify the likelihood of specified tokens appearing in the completion.
-
#
-
# Accepts a JSON object that maps tokens (specified by their token ID in the
-
# tokenizer) to an associated bias value from -100 to 100. Mathematically, the
-
# bias is added to the logits generated by the model prior to sampling. The exact
-
# effect will vary per model, but values between -1 and 1 should decrease or
-
# increase likelihood of selection; values like -100 or 100 should result in a ban
-
# or exclusive selection of the relevant token.
-
#
-
# @return [Hash{Symbol=>Integer}, nil]
-
1
optional :logit_bias, OpenAI::Internal::Type::HashOf[Integer], nil?: true
-
-
# @!attribute logprobs
-
# Whether to return log probabilities of the output tokens or not. If true,
-
# returns the log probabilities of each output token returned in the `content` of
-
# `message`.
-
#
-
# @return [Boolean, nil]
-
1
optional :logprobs, OpenAI::Internal::Type::Boolean, nil?: true
-
-
# @!attribute max_completion_tokens
-
# An upper bound for the number of tokens that can be generated for a completion,
-
# including visible output tokens and
-
# [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
-
#
-
# @return [Integer, nil]
-
1
optional :max_completion_tokens, Integer, nil?: true
-
-
# @!attribute max_tokens
-
# @deprecated
-
#
-
# The maximum number of [tokens](/tokenizer) that can be generated in the chat
-
# completion. This value can be used to control
-
# [costs](https://openai.com/api/pricing/) for text generated via API.
-
#
-
# This value is now deprecated in favor of `max_completion_tokens`, and is not
-
# compatible with
-
# [o-series models](https://platform.openai.com/docs/guides/reasoning).
-
#
-
# @return [Integer, nil]
-
1
optional :max_tokens, Integer, nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute modalities
-
# Output types that you would like the model to generate. Most models are capable
-
# of generating text, which is the default:
-
#
-
# `["text"]`
-
#
-
# The `gpt-4o-audio-preview` model can also be used to
-
# [generate audio](https://platform.openai.com/docs/guides/audio). To request that
-
# this model generate both text and audio responses, you can use:
-
#
-
# `["text", "audio"]`
-
#
-
# @return [Array<Symbol, OpenAI::Models::Chat::CompletionCreateParams::Modality>, nil]
-
1
optional :modalities,
-
-> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Chat::CompletionCreateParams::Modality] },
-
nil?: true
-
-
# @!attribute n
-
# How many chat completion choices to generate for each input message. Note that
-
# you will be charged based on the number of generated tokens across all of the
-
# choices. Keep `n` as `1` to minimize costs.
-
#
-
# @return [Integer, nil]
-
1
optional :n, Integer, nil?: true
-
-
# @!attribute parallel_tool_calls
-
# Whether to enable
-
# [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling)
-
# during tool use.
-
#
-
# @return [Boolean, nil]
-
1
optional :parallel_tool_calls, OpenAI::Internal::Type::Boolean
-
-
# @!attribute prediction
-
# Static predicted output content, such as the content of a text file that is
-
# being regenerated.
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionPredictionContent, nil]
-
1
optional :prediction, -> { OpenAI::Chat::ChatCompletionPredictionContent }, nil?: true
-
-
# @!attribute presence_penalty
-
# Number between -2.0 and 2.0. Positive values penalize new tokens based on
-
# whether they appear in the text so far, increasing the model's likelihood to
-
# talk about new topics.
-
#
-
# @return [Float, nil]
-
1
optional :presence_penalty, Float, nil?: true
-
-
# @!attribute prompt_cache_key
-
# Used by OpenAI to cache responses for similar requests to optimize your cache
-
# hit rates. Replaces the `user` field.
-
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
-
#
-
# @return [String, nil]
-
1
optional :prompt_cache_key, String
-
-
# @!attribute reasoning_effort
-
# **o-series models only**
-
#
-
# Constrains effort on reasoning for
-
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-
# supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-
# result in faster responses and fewer tokens used on reasoning in a response.
-
#
-
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
-
1
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
-
-
# @!attribute response_format
-
# An object specifying the format that the model must output.
-
#
-
# Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
-
# Outputs which ensures the model will match your supplied JSON schema. Learn more
-
# in the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
-
# ensures the message the model generates is valid JSON. Using `json_schema` is
-
# preferred for models that support it.
-
#
-
# @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::ResponseFormatJSONObject, nil]
-
1
optional :response_format, union: -> { OpenAI::Chat::CompletionCreateParams::ResponseFormat }
-
-
# @!attribute safety_identifier
-
# A stable identifier used to help detect users of your application that may be
-
# violating OpenAI's usage policies. The IDs should be a string that uniquely
-
# identifies each user. We recommend hashing their username or email address, in
-
# order to avoid sending us any identifying information.
-
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
-
#
-
# @return [String, nil]
-
1
optional :safety_identifier, String
-
-
# @!attribute seed
-
# This feature is in Beta. If specified, our system will make a best effort to
-
# sample deterministically, such that repeated requests with the same `seed` and
-
# parameters should return the same result. Determinism is not guaranteed, and you
-
# should refer to the `system_fingerprint` response parameter to monitor changes
-
# in the backend.
-
#
-
# @return [Integer, nil]
-
1
optional :seed, Integer, nil?: true
-
-
# @!attribute service_tier
-
# Specifies the processing type used for serving the request.
-
#
-
# - If set to 'auto', then the request will be processed with the service tier
-
# configured in the Project settings. Unless otherwise configured, the Project
-
# will use 'default'.
-
# - If set to 'default', then the request will be processed with the standard
-
# pricing and performance for the selected model.
-
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-
# 'priority', then the request will be processed with the corresponding service
-
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-
# Priority processing.
-
# - When not set, the default behavior is 'auto'.
-
#
-
# When the `service_tier` parameter is set, the response body will include the
-
# `service_tier` value based on the processing mode actually used to serve the
-
# request. This response value may be different from the value set in the
-
# parameter.
-
#
-
# @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil]
-
1
optional :service_tier, enum: -> { OpenAI::Chat::CompletionCreateParams::ServiceTier }, nil?: true
-
-
# @!attribute stop
-
# Not supported with latest reasoning models `o3` and `o4-mini`.
-
#
-
# Up to 4 sequences where the API will stop generating further tokens. The
-
# returned text will not contain the stop sequence.
-
#
-
# @return [String, Array<String>, nil]
-
1
optional :stop, union: -> { OpenAI::Chat::CompletionCreateParams::Stop }, nil?: true
-
-
# @!attribute store
-
# Whether or not to store the output of this chat completion request for use in
-
# our [model distillation](https://platform.openai.com/docs/guides/distillation)
-
# or [evals](https://platform.openai.com/docs/guides/evals) products.
-
#
-
# Supports text and image inputs. Note: image inputs over 10MB will be dropped.
-
#
-
# @return [Boolean, nil]
-
1
optional :store, OpenAI::Internal::Type::Boolean, nil?: true
-
-
# @!attribute stream_options
-
# Options for streaming response. Only set this when you set `stream: true`.
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil]
-
1
optional :stream_options, -> { OpenAI::Chat::ChatCompletionStreamOptions }, nil?: true
-
-
# @!attribute temperature
-
# What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
-
# make the output more random, while lower values like 0.2 will make it more
-
# focused and deterministic. We generally recommend altering this or `top_p` but
-
# not both.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float, nil?: true
-
-
# @!attribute tool_choice
-
# Controls which (if any) tool is called by the model. `none` means the model will
-
# not call any tool and instead generates a message. `auto` means the model can
-
# pick between generating a message or calling one or more tools. `required` means
-
# the model must call one or more tools. Specifying a particular tool via
-
# `{"type": "function", "function": {"name": "my_function"}}` forces the model to
-
# call that tool.
-
#
-
# `none` is the default when no tools are present. `auto` is the default if tools
-
# are present.
-
#
-
# @return [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice, nil]
-
1
optional :tool_choice, union: -> { OpenAI::Chat::ChatCompletionToolChoiceOption }
-
-
# @!attribute tools
-
# A list of tools the model may call. Currently, only functions are supported as a
-
# tool. Use this to provide a list of functions the model may generate JSON inputs
-
# for. A max of 128 functions are supported.
-
#
-
# @return [Array<OpenAI::Models::Chat::ChatCompletionTool, OpenAI::StructuredOutput::JsonSchemaConverter>, nil]
-
1
optional :tools,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::UnionOf[
-
OpenAI::Chat::ChatCompletionTool, OpenAI::StructuredOutput::JsonSchemaConverter
-
]]
-
}
-
-
# @!attribute top_logprobs
-
# An integer between 0 and 20 specifying the number of most likely tokens to
-
# return at each token position, each with an associated log probability.
-
# `logprobs` must be set to `true` if this parameter is used.
-
#
-
# @return [Integer, nil]
-
1
optional :top_logprobs, Integer, nil?: true
-
-
# @!attribute top_p
-
# An alternative to sampling with temperature, called nucleus sampling, where the
-
# model considers the results of the tokens with top_p probability mass. So 0.1
-
# means only the tokens comprising the top 10% probability mass are considered.
-
#
-
# We generally recommend altering this or `temperature` but not both.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float, nil?: true
-
-
# @!attribute user
-
# @deprecated
-
#
-
# This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
-
# `prompt_cache_key` instead to maintain caching optimizations. A stable
-
# identifier for your end-users. Used to boost cache hit rates by better bucketing
-
# similar requests and to help OpenAI detect and prevent abuse.
-
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
-
#
-
# @return [String, nil]
-
1
optional :user, String
-
-
# @!attribute web_search_options
-
# This tool searches the web for relevant results to use in a response. Learn more
-
# about the
-
# [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
-
#
-
# @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions, nil]
-
1
optional :web_search_options, -> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions }
-
-
# @!method initialize(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::CompletionCreateParams} for more details.
-
#
-
# @param messages [Array<OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, OpenAI::Models::Chat::ChatCompletionSystemMessageParam, OpenAI::Models::Chat::ChatCompletionUserMessageParam, OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, OpenAI::Models::Chat::ChatCompletionToolMessageParam, OpenAI::Models::Chat::ChatCompletionFunctionMessageParam>] A list of messages comprising the conversation so far. Depending on the
-
#
-
# @param model [String, Symbol, OpenAI::Models::ChatModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
-
#
-
# @param audio [OpenAI::Models::Chat::ChatCompletionAudioParam, nil] Parameters for audio output. Required when audio output is requested with
-
#
-
# @param frequency_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on
-
#
-
# @param function_call [Symbol, OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode, OpenAI::Models::Chat::ChatCompletionFunctionCallOption] Deprecated in favor of `tool_choice`.
-
#
-
# @param functions [Array<OpenAI::Models::Chat::CompletionCreateParams::Function>] Deprecated in favor of `tools`.
-
#
-
# @param logit_bias [Hash{Symbol=>Integer}, nil] Modify the likelihood of specified tokens appearing in the completion.
-
#
-
# @param logprobs [Boolean, nil] Whether to return log probabilities of the output tokens or not. If true,
-
#
-
# @param max_completion_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a completion,
-
#
-
# @param max_tokens [Integer, nil] The maximum number of [tokens](/tokenizer) that can be generated in the
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param modalities [Array<Symbol, OpenAI::Models::Chat::CompletionCreateParams::Modality>, nil] Output types that you would like the model to generate.
-
#
-
# @param n [Integer, nil] How many chat completion choices to generate for each input message. Note that y
-
#
-
# @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g
-
#
-
# @param prediction [OpenAI::Models::Chat::ChatCompletionPredictionContent, nil] Static predicted output content, such as the content of a text file that is
-
#
-
# @param presence_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on
-
#
-
# @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
-
#
-
# @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
-
#
-
# @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
-
#
-
# @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
-
#
-
# @param seed [Integer, nil] This feature is in Beta.
-
#
-
# @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
-
#
-
# @param stop [String, Array<String>, nil] Not supported with latest reasoning models `o3` and `o4-mini`.
-
#
-
# @param store [Boolean, nil] Whether or not to store the output of this chat completion request for
-
#
-
# @param stream_options [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] Options for streaming response. Only set this when you set `stream: true`.
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice] Controls which (if any) tool is called by the model.
-
#
-
# @param tools [Array<OpenAI::Models::Chat::ChatCompletionTool, OpenAI::StructuredOutput::JsonSchemaConverter>] A list of tools the model may call. Currently, only functions are supported as a
-
#
-
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
-
#
-
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
-
#
-
# @param web_search_options [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] This tool searches the web for relevant results to use in a response.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
-
# wide range of models with different capabilities, performance characteristics,
-
# and price points. Refer to the
-
# [model guide](https://platform.openai.com/docs/models) to browse and compare
-
# available models.
-
1
module Model
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
# Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
-
# offers a wide range of models with different capabilities, performance
-
# characteristics, and price points. Refer to the [model guide](https://platform.openai.com/docs/models)
-
# to browse and compare available models.
-
1
variant enum: -> { OpenAI::ChatModel }
-
-
# @!method self.variants
-
# @return [Array(String, Symbol, OpenAI::Models::ChatModel)]
-
end
-
-
# @deprecated
-
#
-
# Deprecated in favor of `tool_choice`.
-
#
-
# Controls which (if any) function is called by the model.
-
#
-
# `none` means the model will not call a function and instead generates a message.
-
#
-
# `auto` means the model can pick between generating a message or calling a
-
# function.
-
#
-
# Specifying a particular function via `{"name": "my_function"}` forces the model
-
# to call that function.
-
#
-
# `none` is the default when no functions are present. `auto` is the default if
-
# functions are present.
-
1
module FunctionCall
-
1
extend OpenAI::Internal::Type::Union
-
-
# `none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function.
-
1
variant enum: -> { OpenAI::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode }
-
-
# Specifying a particular function via `{"name": "my_function"}` forces the model to call that function.
-
1
variant -> { OpenAI::Chat::ChatCompletionFunctionCallOption }
-
-
# `none` means the model will not call a function and instead generates a message.
-
# `auto` means the model can pick between generating a message or calling a
-
# function.
-
1
module FunctionCallMode
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
NONE = :none
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @!method self.variants
-
# @return [Array(Symbol, OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode, OpenAI::Models::Chat::ChatCompletionFunctionCallOption)]
-
end
-
-
# @deprecated
-
1
class Function < OpenAI::Internal::Type::BaseModel
-
# @!attribute name
-
# The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
-
# underscores and dashes, with a maximum length of 64.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute description
-
# A description of what the function does, used by the model to choose when and
-
# how to call the function.
-
#
-
# @return [String, nil]
-
1
optional :description, String
-
-
# @!attribute parameters
-
# The parameters the functions accepts, described as a JSON Schema object. See the
-
# [guide](https://platform.openai.com/docs/guides/function-calling) for examples,
-
# and the
-
# [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
-
# documentation about the format.
-
#
-
# Omitting `parameters` defines a function with an empty parameter list.
-
#
-
# @return [Hash{Symbol=>Object}, nil]
-
1
optional :parameters, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!method initialize(name:, description: nil, parameters: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::CompletionCreateParams::Function} for more details.
-
#
-
# @param name [String] The name of the function to be called. Must be a-z, A-Z, 0-9, or contain undersc
-
#
-
# @param description [String] A description of what the function does, used by the model to choose when and ho
-
#
-
# @param parameters [Hash{Symbol=>Object}] The parameters the functions accepts, described as a JSON Schema object. See the
-
end
-
-
1
module Modality
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TEXT = :text
-
1
AUDIO = :audio
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# An object specifying the format that the model must output.
-
#
-
# Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
-
# Outputs which ensures the model will match your supplied JSON schema. Learn more
-
# in the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
-
# ensures the message the model generates is valid JSON. Using `json_schema` is
-
# preferred for models that support it.
-
1
module ResponseFormat
-
1
extend OpenAI::Internal::Type::Union
-
-
# Default response format. Used to generate text responses.
-
1
variant -> { OpenAI::ResponseFormatText }
-
-
# JSON Schema response format. Used to generate structured JSON responses.
-
# Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
-
1
variant -> { OpenAI::ResponseFormatJSONSchema }
-
-
# An {OpenAI::BaseModel} can be provided and implicitly converted into {OpenAI::Models::ResponseFormatJSONSchema}.
-
# See examples for more details.
-
#
-
# Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
-
1
variant -> { OpenAI::StructuredOutput::JsonSchemaConverter }
-
-
# JSON object response format. An older method of generating JSON responses.
-
# Using `json_schema` is recommended for models that support it. Note that the
-
# model will not generate JSON without a system or user message instructing it
-
# to do so.
-
1
variant -> { OpenAI::ResponseFormatJSONObject }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject)]
-
end
-
-
# Specifies the processing type used for serving the request.
-
#
-
# - If set to 'auto', then the request will be processed with the service tier
-
# configured in the Project settings. Unless otherwise configured, the Project
-
# will use 'default'.
-
# - If set to 'default', then the request will be processed with the standard
-
# pricing and performance for the selected model.
-
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-
# 'priority', then the request will be processed with the corresponding service
-
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-
# Priority processing.
-
# - When not set, the default behavior is 'auto'.
-
#
-
# When the `service_tier` parameter is set, the response body will include the
-
# `service_tier` value based on the processing mode actually used to serve the
-
# request. This response value may be different from the value set in the
-
# parameter.
-
1
module ServiceTier
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
DEFAULT = :default
-
1
FLEX = :flex
-
1
SCALE = :scale
-
1
PRIORITY = :priority
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# Not supported with latest reasoning models `o3` and `o4-mini`.
-
#
-
# Up to 4 sequences where the API will stop generating further tokens. The
-
# returned text will not contain the stop sequence.
-
1
module Stop
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
1
variant -> { OpenAI::Models::Chat::CompletionCreateParams::Stop::StringArray }
-
-
# @!method self.variants
-
# @return [Array(String, Array<String>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
StringArray = OpenAI::Internal::Type::ArrayOf[String]
-
end
-
-
1
class WebSearchOptions < OpenAI::Internal::Type::BaseModel
-
# @!attribute search_context_size
-
# High level guidance for the amount of context window space to use for the
-
# search. One of `low`, `medium`, or `high`. `medium` is the default.
-
#
-
# @return [Symbol, OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize, nil]
-
1
optional :search_context_size,
-
enum: -> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize }
-
-
# @!attribute user_location
-
# Approximate location parameters for the search.
-
#
-
# @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation, nil]
-
1
optional :user_location,
-
-> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation },
-
nil?: true
-
-
# @!method initialize(search_context_size: nil, user_location: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions} for more
-
# details.
-
#
-
# This tool searches the web for relevant results to use in a response. Learn more
-
# about the
-
# [web search tool](https://platform.openai.com/docs/guides/tools-web-search?api-mode=chat).
-
#
-
# @param search_context_size [Symbol, OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::SearchContextSize] High level guidance for the amount of context window space to use for the
-
#
-
# @param user_location [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation, nil] Approximate location parameters for the search.
-
-
# High level guidance for the amount of context window space to use for the
-
# search. One of `low`, `medium`, or `high`. `medium` is the default.
-
#
-
# @see OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions#search_context_size
-
1
module SearchContextSize
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
LOW = :low
-
1
MEDIUM = :medium
-
1
HIGH = :high
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @see OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions#user_location
-
1
class UserLocation < OpenAI::Internal::Type::BaseModel
-
# @!attribute approximate
-
# Approximate location parameters for the search.
-
#
-
# @return [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate]
-
1
required :approximate,
-
-> { OpenAI::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate }
-
-
# @!attribute type
-
# The type of location approximation. Always `approximate`.
-
#
-
# @return [Symbol, :approximate]
-
1
required :type, const: :approximate
-
-
# @!method initialize(approximate:, type: :approximate)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation}
-
# for more details.
-
#
-
# Approximate location parameters for the search.
-
#
-
# @param approximate [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate] Approximate location parameters for the search.
-
#
-
# @param type [Symbol, :approximate] The type of location approximation. Always `approximate`.
-
-
# @see OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation#approximate
-
1
class Approximate < OpenAI::Internal::Type::BaseModel
-
# @!attribute city
-
# Free text input for the city of the user, e.g. `San Francisco`.
-
#
-
# @return [String, nil]
-
1
optional :city, String
-
-
# @!attribute country
-
# The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
-
# the user, e.g. `US`.
-
#
-
# @return [String, nil]
-
1
optional :country, String
-
-
# @!attribute region
-
# Free text input for the region of the user, e.g. `California`.
-
#
-
# @return [String, nil]
-
1
optional :region, String
-
-
# @!attribute timezone
-
# The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
-
# user, e.g. `America/Los_Angeles`.
-
#
-
# @return [String, nil]
-
1
optional :timezone, String
-
-
# @!method initialize(city: nil, country: nil, region: nil, timezone: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions::UserLocation::Approximate}
-
# for more details.
-
#
-
# Approximate location parameters for the search.
-
#
-
# @param city [String] Free text input for the city of the user, e.g. `San Francisco`.
-
#
-
# @param country [String] The two-letter
-
#
-
# @param region [String] Free text input for the region of the user, e.g. `California`.
-
#
-
# @param timezone [String] The [IANA timezone](https://timeapi.io/documentation/iana-timezones)
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
# @see OpenAI::Resources::Chat::Completions#delete
-
1
class CompletionDeleteParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
# @see OpenAI::Resources::Chat::Completions#list
-
1
class CompletionListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute after
-
# Identifier for the last chat completion from the previous pagination request.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute limit
-
# Number of Chat Completions to retrieve.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!attribute metadata
-
# A list of metadata keys to filter the Chat Completions by. Example:
-
#
-
# `metadata[key1]=value1&metadata[key2]=value2`
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute model
-
# The model used to generate the Chat Completions.
-
#
-
# @return [String, nil]
-
1
optional :model, String
-
-
# @!attribute order
-
# Sort order for Chat Completions by timestamp. Use `asc` for ascending order or
-
# `desc` for descending order. Defaults to `asc`.
-
#
-
# @return [Symbol, OpenAI::Models::Chat::CompletionListParams::Order, nil]
-
1
optional :order, enum: -> { OpenAI::Chat::CompletionListParams::Order }
-
-
# @!method initialize(after: nil, limit: nil, metadata: nil, model: nil, order: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::CompletionListParams} for more details.
-
#
-
# @param after [String] Identifier for the last chat completion from the previous pagination request.
-
#
-
# @param limit [Integer] Number of Chat Completions to retrieve.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] A list of metadata keys to filter the Chat Completions by. Example:
-
#
-
# @param model [String] The model used to generate the Chat Completions.
-
#
-
# @param order [Symbol, OpenAI::Models::Chat::CompletionListParams::Order] Sort order for Chat Completions by timestamp. Use `asc` for ascending order or `
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Sort order for Chat Completions by timestamp. Use `asc` for ascending order or
-
# `desc` for descending order. Defaults to `asc`.
-
1
module Order
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASC = :asc
-
1
DESC = :desc
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
# @see OpenAI::Resources::Chat::Completions#retrieve
-
1
class CompletionRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
# @see OpenAI::Resources::Chat::Completions#update
-
1
class CompletionUpdateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!method initialize(metadata:, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::CompletionUpdateParams} for more details.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Chat
-
1
module Completions
-
# @see OpenAI::Resources::Chat::Completions::Messages#list
-
1
class MessageListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute after
-
# Identifier for the last message from the previous pagination request.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute limit
-
# Number of messages to retrieve.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!attribute order
-
# Sort order for messages by timestamp. Use `asc` for ascending order or `desc`
-
# for descending order. Defaults to `asc`.
-
#
-
# @return [Symbol, OpenAI::Models::Chat::Completions::MessageListParams::Order, nil]
-
1
optional :order, enum: -> { OpenAI::Chat::Completions::MessageListParams::Order }
-
-
# @!method initialize(after: nil, limit: nil, order: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::Completions::MessageListParams} for more details.
-
#
-
# @param after [String] Identifier for the last message from the previous pagination request.
-
#
-
# @param limit [Integer] Number of messages to retrieve.
-
#
-
# @param order [Symbol, OpenAI::Models::Chat::Completions::MessageListParams::Order] Sort order for messages by timestamp. Use `asc` for ascending order or `desc` fo
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Sort order for messages by timestamp. Use `asc` for ascending order or `desc`
-
# for descending order. Defaults to `asc`.
-
1
module Order
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASC = :asc
-
1
DESC = :desc
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module ChatModel
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
GPT_4_1 = :"gpt-4.1"
-
1
GPT_4_1_MINI = :"gpt-4.1-mini"
-
1
GPT_4_1_NANO = :"gpt-4.1-nano"
-
1
GPT_4_1_2025_04_14 = :"gpt-4.1-2025-04-14"
-
1
GPT_4_1_MINI_2025_04_14 = :"gpt-4.1-mini-2025-04-14"
-
1
GPT_4_1_NANO_2025_04_14 = :"gpt-4.1-nano-2025-04-14"
-
1
O4_MINI = :"o4-mini"
-
1
O4_MINI_2025_04_16 = :"o4-mini-2025-04-16"
-
1
O3 = :o3
-
1
O3_2025_04_16 = :"o3-2025-04-16"
-
1
O3_MINI = :"o3-mini"
-
1
O3_MINI_2025_01_31 = :"o3-mini-2025-01-31"
-
1
O1 = :o1
-
1
O1_2024_12_17 = :"o1-2024-12-17"
-
1
O1_PREVIEW = :"o1-preview"
-
1
O1_PREVIEW_2024_09_12 = :"o1-preview-2024-09-12"
-
1
O1_MINI = :"o1-mini"
-
1
O1_MINI_2024_09_12 = :"o1-mini-2024-09-12"
-
1
GPT_4O = :"gpt-4o"
-
1
GPT_4O_2024_11_20 = :"gpt-4o-2024-11-20"
-
1
GPT_4O_2024_08_06 = :"gpt-4o-2024-08-06"
-
1
GPT_4O_2024_05_13 = :"gpt-4o-2024-05-13"
-
1
GPT_4O_AUDIO_PREVIEW = :"gpt-4o-audio-preview"
-
1
GPT_4O_AUDIO_PREVIEW_2024_10_01 = :"gpt-4o-audio-preview-2024-10-01"
-
1
GPT_4O_AUDIO_PREVIEW_2024_12_17 = :"gpt-4o-audio-preview-2024-12-17"
-
1
GPT_4O_AUDIO_PREVIEW_2025_06_03 = :"gpt-4o-audio-preview-2025-06-03"
-
1
GPT_4O_MINI_AUDIO_PREVIEW = :"gpt-4o-mini-audio-preview"
-
1
GPT_4O_MINI_AUDIO_PREVIEW_2024_12_17 = :"gpt-4o-mini-audio-preview-2024-12-17"
-
1
GPT_4O_SEARCH_PREVIEW = :"gpt-4o-search-preview"
-
1
GPT_4O_MINI_SEARCH_PREVIEW = :"gpt-4o-mini-search-preview"
-
1
GPT_4O_SEARCH_PREVIEW_2025_03_11 = :"gpt-4o-search-preview-2025-03-11"
-
1
GPT_4O_MINI_SEARCH_PREVIEW_2025_03_11 = :"gpt-4o-mini-search-preview-2025-03-11"
-
1
CHATGPT_4O_LATEST = :"chatgpt-4o-latest"
-
1
CODEX_MINI_LATEST = :"codex-mini-latest"
-
1
GPT_4O_MINI = :"gpt-4o-mini"
-
1
GPT_4O_MINI_2024_07_18 = :"gpt-4o-mini-2024-07-18"
-
1
GPT_4_TURBO = :"gpt-4-turbo"
-
1
GPT_4_TURBO_2024_04_09 = :"gpt-4-turbo-2024-04-09"
-
1
GPT_4_0125_PREVIEW = :"gpt-4-0125-preview"
-
1
GPT_4_TURBO_PREVIEW = :"gpt-4-turbo-preview"
-
1
GPT_4_1106_PREVIEW = :"gpt-4-1106-preview"
-
1
GPT_4_VISION_PREVIEW = :"gpt-4-vision-preview"
-
1
GPT_4 = :"gpt-4"
-
1
GPT_4_0314 = :"gpt-4-0314"
-
1
GPT_4_0613 = :"gpt-4-0613"
-
1
GPT_4_32K = :"gpt-4-32k"
-
1
GPT_4_32K_0314 = :"gpt-4-32k-0314"
-
1
GPT_4_32K_0613 = :"gpt-4-32k-0613"
-
1
GPT_3_5_TURBO = :"gpt-3.5-turbo"
-
1
GPT_3_5_TURBO_16K = :"gpt-3.5-turbo-16k"
-
1
GPT_3_5_TURBO_0301 = :"gpt-3.5-turbo-0301"
-
1
GPT_3_5_TURBO_0613 = :"gpt-3.5-turbo-0613"
-
1
GPT_3_5_TURBO_1106 = :"gpt-3.5-turbo-1106"
-
1
GPT_3_5_TURBO_0125 = :"gpt-3.5-turbo-0125"
-
1
GPT_3_5_TURBO_16K_0613 = :"gpt-3.5-turbo-16k-0613"
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class ComparisonFilter < OpenAI::Internal::Type::BaseModel
-
# @!attribute key
-
# The key to compare against the value.
-
#
-
# @return [String]
-
1
required :key, String
-
-
# @!attribute type
-
# Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`.
-
#
-
# - `eq`: equals
-
# - `ne`: not equal
-
# - `gt`: greater than
-
# - `gte`: greater than or equal
-
# - `lt`: less than
-
# - `lte`: less than or equal
-
#
-
# @return [Symbol, OpenAI::Models::ComparisonFilter::Type]
-
1
required :type, enum: -> { OpenAI::ComparisonFilter::Type }
-
-
# @!attribute value
-
# The value to compare against the attribute key; supports string, number, or
-
# boolean types.
-
#
-
# @return [String, Float, Boolean]
-
1
required :value, union: -> { OpenAI::ComparisonFilter::Value }
-
-
# @!method initialize(key:, type:, value:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ComparisonFilter} for more details.
-
#
-
# A filter used to compare a specified attribute key to a given value using a
-
# defined comparison operation.
-
#
-
# @param key [String] The key to compare against the value.
-
#
-
# @param type [Symbol, OpenAI::Models::ComparisonFilter::Type] Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`.
-
#
-
# @param value [String, Float, Boolean] The value to compare against the attribute key; supports string, number, or bool
-
-
# Specifies the comparison operator: `eq`, `ne`, `gt`, `gte`, `lt`, `lte`.
-
#
-
# - `eq`: equals
-
# - `ne`: not equal
-
# - `gt`: greater than
-
# - `gte`: greater than or equal
-
# - `lt`: less than
-
# - `lte`: less than or equal
-
#
-
# @see OpenAI::Models::ComparisonFilter#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
EQ = :eq
-
1
NE = :ne
-
1
GT = :gt
-
1
GTE = :gte
-
1
LT = :lt
-
1
LTE = :lte
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The value to compare against the attribute key; supports string, number, or
-
# boolean types.
-
#
-
# @see OpenAI::Models::ComparisonFilter#value
-
1
module Value
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
1
variant Float
-
-
1
variant OpenAI::Internal::Type::Boolean
-
-
# @!method self.variants
-
# @return [Array(String, Float, Boolean)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Completions#create
-
#
-
# @see OpenAI::Resources::Completions#create_streaming
-
1
class Completion < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# A unique identifier for the completion.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute choices
-
# The list of completion choices the model generated for the input prompt.
-
#
-
# @return [Array<OpenAI::Models::CompletionChoice>]
-
1
required :choices, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::CompletionChoice] }
-
-
# @!attribute created
-
# The Unix timestamp (in seconds) of when the completion was created.
-
#
-
# @return [Integer]
-
1
required :created, Integer
-
-
# @!attribute model
-
# The model used for completion.
-
#
-
# @return [String]
-
1
required :model, String
-
-
# @!attribute object
-
# The object type, which is always "text_completion"
-
#
-
# @return [Symbol, :text_completion]
-
1
required :object, const: :text_completion
-
-
# @!attribute system_fingerprint
-
# This fingerprint represents the backend configuration that the model runs with.
-
#
-
# Can be used in conjunction with the `seed` request parameter to understand when
-
# backend changes have been made that might impact determinism.
-
#
-
# @return [String, nil]
-
1
optional :system_fingerprint, String
-
-
# @!attribute usage
-
# Usage statistics for the completion request.
-
#
-
# @return [OpenAI::Models::CompletionUsage, nil]
-
1
optional :usage, -> { OpenAI::CompletionUsage }
-
-
# @!method initialize(id:, choices:, created:, model:, system_fingerprint: nil, usage: nil, object: :text_completion)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Completion} for more details.
-
#
-
# Represents a completion response from the API. Note: both the streamed and
-
# non-streamed response objects share the same shape (unlike the chat endpoint).
-
#
-
# @param id [String] A unique identifier for the completion.
-
#
-
# @param choices [Array<OpenAI::Models::CompletionChoice>] The list of completion choices the model generated for the input prompt.
-
#
-
# @param created [Integer] The Unix timestamp (in seconds) of when the completion was created.
-
#
-
# @param model [String] The model used for completion.
-
#
-
# @param system_fingerprint [String] This fingerprint represents the backend configuration that the model runs with.
-
#
-
# @param usage [OpenAI::Models::CompletionUsage] Usage statistics for the completion request.
-
#
-
# @param object [Symbol, :text_completion] The object type, which is always "text_completion"
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class CompletionChoice < OpenAI::Internal::Type::BaseModel
-
# @!attribute finish_reason
-
# The reason the model stopped generating tokens. This will be `stop` if the model
-
# hit a natural stop point or a provided stop sequence, `length` if the maximum
-
# number of tokens specified in the request was reached, or `content_filter` if
-
# content was omitted due to a flag from our content filters.
-
#
-
# @return [Symbol, OpenAI::Models::CompletionChoice::FinishReason]
-
1
required :finish_reason, enum: -> { OpenAI::CompletionChoice::FinishReason }
-
-
# @!attribute index
-
#
-
# @return [Integer]
-
1
required :index, Integer
-
-
# @!attribute logprobs
-
#
-
# @return [OpenAI::Models::CompletionChoice::Logprobs, nil]
-
1
required :logprobs, -> { OpenAI::CompletionChoice::Logprobs }, nil?: true
-
-
# @!attribute text
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!method initialize(finish_reason:, index:, logprobs:, text:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::CompletionChoice} for more details.
-
#
-
# @param finish_reason [Symbol, OpenAI::Models::CompletionChoice::FinishReason] The reason the model stopped generating tokens. This will be `stop` if the model
-
#
-
# @param index [Integer]
-
#
-
# @param logprobs [OpenAI::Models::CompletionChoice::Logprobs, nil]
-
#
-
# @param text [String]
-
-
# The reason the model stopped generating tokens. This will be `stop` if the model
-
# hit a natural stop point or a provided stop sequence, `length` if the maximum
-
# number of tokens specified in the request was reached, or `content_filter` if
-
# content was omitted due to a flag from our content filters.
-
#
-
# @see OpenAI::Models::CompletionChoice#finish_reason
-
1
module FinishReason
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
STOP = :stop
-
1
LENGTH = :length
-
1
CONTENT_FILTER = :content_filter
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @see OpenAI::Models::CompletionChoice#logprobs
-
1
class Logprobs < OpenAI::Internal::Type::BaseModel
-
# @!attribute text_offset
-
#
-
# @return [Array<Integer>, nil]
-
1
optional :text_offset, OpenAI::Internal::Type::ArrayOf[Integer]
-
-
# @!attribute token_logprobs
-
#
-
# @return [Array<Float>, nil]
-
1
optional :token_logprobs, OpenAI::Internal::Type::ArrayOf[Float]
-
-
# @!attribute tokens
-
#
-
# @return [Array<String>, nil]
-
1
optional :tokens, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute top_logprobs
-
#
-
# @return [Array<Hash{Symbol=>Float}>, nil]
-
1
optional :top_logprobs, OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[Float]]
-
-
# @!method initialize(text_offset: nil, token_logprobs: nil, tokens: nil, top_logprobs: nil)
-
# @param text_offset [Array<Integer>]
-
# @param token_logprobs [Array<Float>]
-
# @param tokens [Array<String>]
-
# @param top_logprobs [Array<Hash{Symbol=>Float}>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Completions#create
-
#
-
# @see OpenAI::Resources::Completions#create_streaming
-
1
class CompletionCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute model
-
# ID of the model to use. You can use the
-
# [List models](https://platform.openai.com/docs/api-reference/models/list) API to
-
# see all of your available models, or see our
-
# [Model overview](https://platform.openai.com/docs/models) for descriptions of
-
# them.
-
#
-
# @return [String, Symbol, OpenAI::Models::CompletionCreateParams::Model]
-
1
required :model, union: -> { OpenAI::CompletionCreateParams::Model }
-
-
# @!attribute prompt
-
# The prompt(s) to generate completions for, encoded as a string, array of
-
# strings, array of tokens, or array of token arrays.
-
#
-
# Note that <|endoftext|> is the document separator that the model sees during
-
# training, so if a prompt is not specified the model will generate as if from the
-
# beginning of a new document.
-
#
-
# @return [String, Array<String>, Array<Integer>, Array<Array<Integer>>, nil]
-
1
required :prompt, union: -> { OpenAI::CompletionCreateParams::Prompt }, nil?: true
-
-
# @!attribute best_of
-
# Generates `best_of` completions server-side and returns the "best" (the one with
-
# the highest log probability per token). Results cannot be streamed.
-
#
-
# When used with `n`, `best_of` controls the number of candidate completions and
-
# `n` specifies how many to return – `best_of` must be greater than `n`.
-
#
-
# **Note:** Because this parameter generates many completions, it can quickly
-
# consume your token quota. Use carefully and ensure that you have reasonable
-
# settings for `max_tokens` and `stop`.
-
#
-
# @return [Integer, nil]
-
1
optional :best_of, Integer, nil?: true
-
-
# @!attribute echo
-
# Echo back the prompt in addition to the completion
-
#
-
# @return [Boolean, nil]
-
1
optional :echo, OpenAI::Internal::Type::Boolean, nil?: true
-
-
# @!attribute frequency_penalty
-
# Number between -2.0 and 2.0. Positive values penalize new tokens based on their
-
# existing frequency in the text so far, decreasing the model's likelihood to
-
# repeat the same line verbatim.
-
#
-
# [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
-
#
-
# @return [Float, nil]
-
1
optional :frequency_penalty, Float, nil?: true
-
-
# @!attribute logit_bias
-
# Modify the likelihood of specified tokens appearing in the completion.
-
#
-
# Accepts a JSON object that maps tokens (specified by their token ID in the GPT
-
# tokenizer) to an associated bias value from -100 to 100. You can use this
-
# [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
-
# Mathematically, the bias is added to the logits generated by the model prior to
-
# sampling. The exact effect will vary per model, but values between -1 and 1
-
# should decrease or increase likelihood of selection; values like -100 or 100
-
# should result in a ban or exclusive selection of the relevant token.
-
#
-
# As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
-
# from being generated.
-
#
-
# @return [Hash{Symbol=>Integer}, nil]
-
1
optional :logit_bias, OpenAI::Internal::Type::HashOf[Integer], nil?: true
-
-
# @!attribute logprobs
-
# Include the log probabilities on the `logprobs` most likely output tokens, as
-
# well the chosen tokens. For example, if `logprobs` is 5, the API will return a
-
# list of the 5 most likely tokens. The API will always return the `logprob` of
-
# the sampled token, so there may be up to `logprobs+1` elements in the response.
-
#
-
# The maximum value for `logprobs` is 5.
-
#
-
# @return [Integer, nil]
-
1
optional :logprobs, Integer, nil?: true
-
-
# @!attribute max_tokens
-
# The maximum number of [tokens](/tokenizer) that can be generated in the
-
# completion.
-
#
-
# The token count of your prompt plus `max_tokens` cannot exceed the model's
-
# context length.
-
# [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
-
# for counting tokens.
-
#
-
# @return [Integer, nil]
-
1
optional :max_tokens, Integer, nil?: true
-
-
# @!attribute n
-
# How many completions to generate for each prompt.
-
#
-
# **Note:** Because this parameter generates many completions, it can quickly
-
# consume your token quota. Use carefully and ensure that you have reasonable
-
# settings for `max_tokens` and `stop`.
-
#
-
# @return [Integer, nil]
-
1
optional :n, Integer, nil?: true
-
-
# @!attribute presence_penalty
-
# Number between -2.0 and 2.0. Positive values penalize new tokens based on
-
# whether they appear in the text so far, increasing the model's likelihood to
-
# talk about new topics.
-
#
-
# [See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
-
#
-
# @return [Float, nil]
-
1
optional :presence_penalty, Float, nil?: true
-
-
# @!attribute seed
-
# If specified, our system will make a best effort to sample deterministically,
-
# such that repeated requests with the same `seed` and parameters should return
-
# the same result.
-
#
-
# Determinism is not guaranteed, and you should refer to the `system_fingerprint`
-
# response parameter to monitor changes in the backend.
-
#
-
# @return [Integer, nil]
-
1
optional :seed, Integer, nil?: true
-
-
# @!attribute stop
-
# Not supported with latest reasoning models `o3` and `o4-mini`.
-
#
-
# Up to 4 sequences where the API will stop generating further tokens. The
-
# returned text will not contain the stop sequence.
-
#
-
# @return [String, Array<String>, nil]
-
1
optional :stop, union: -> { OpenAI::CompletionCreateParams::Stop }, nil?: true
-
-
# @!attribute stream_options
-
# Options for streaming response. Only set this when you set `stream: true`.
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil]
-
1
optional :stream_options, -> { OpenAI::Chat::ChatCompletionStreamOptions }, nil?: true
-
-
# @!attribute suffix
-
# The suffix that comes after a completion of inserted text.
-
#
-
# This parameter is only supported for `gpt-3.5-turbo-instruct`.
-
#
-
# @return [String, nil]
-
1
optional :suffix, String, nil?: true
-
-
# @!attribute temperature
-
# What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
-
# make the output more random, while lower values like 0.2 will make it more
-
# focused and deterministic.
-
#
-
# We generally recommend altering this or `top_p` but not both.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float, nil?: true
-
-
# @!attribute top_p
-
# An alternative to sampling with temperature, called nucleus sampling, where the
-
# model considers the results of the tokens with top_p probability mass. So 0.1
-
# means only the tokens comprising the top 10% probability mass are considered.
-
#
-
# We generally recommend altering this or `temperature` but not both.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float, nil?: true
-
-
# @!attribute user
-
# A unique identifier representing your end-user, which can help OpenAI to monitor
-
# and detect abuse.
-
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
-
#
-
# @return [String, nil]
-
1
optional :user, String
-
-
# @!method initialize(model:, prompt:, best_of: nil, echo: nil, frequency_penalty: nil, logit_bias: nil, logprobs: nil, max_tokens: nil, n: nil, presence_penalty: nil, seed: nil, stop: nil, stream_options: nil, suffix: nil, temperature: nil, top_p: nil, user: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::CompletionCreateParams} for more details.
-
#
-
# @param model [String, Symbol, OpenAI::Models::CompletionCreateParams::Model] ID of the model to use. You can use the [List models](https://platform.openai.co
-
#
-
# @param prompt [String, Array<String>, Array<Integer>, Array<Array<Integer>>, nil] The prompt(s) to generate completions for, encoded as a string, array of strings
-
#
-
# @param best_of [Integer, nil] Generates `best_of` completions server-side and returns the "best" (the one with
-
#
-
# @param echo [Boolean, nil] Echo back the prompt in addition to the completion
-
#
-
# @param frequency_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on their
-
#
-
# @param logit_bias [Hash{Symbol=>Integer}, nil] Modify the likelihood of specified tokens appearing in the completion.
-
#
-
# @param logprobs [Integer, nil] Include the log probabilities on the `logprobs` most likely output tokens, as we
-
#
-
# @param max_tokens [Integer, nil] The maximum number of [tokens](/tokenizer) that can be generated in the completi
-
#
-
# @param n [Integer, nil] How many completions to generate for each prompt.
-
#
-
# @param presence_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on whethe
-
#
-
# @param seed [Integer, nil] If specified, our system will make a best effort to sample deterministically, su
-
#
-
# @param stop [String, Array<String>, nil] Not supported with latest reasoning models `o3` and `o4-mini`.
-
#
-
# @param stream_options [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] Options for streaming response. Only set this when you set `stream: true`.
-
#
-
# @param suffix [String, nil] The suffix that comes after a completion of inserted text.
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the
-
#
-
# @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# ID of the model to use. You can use the
-
# [List models](https://platform.openai.com/docs/api-reference/models/list) API to
-
# see all of your available models, or see our
-
# [Model overview](https://platform.openai.com/docs/models) for descriptions of
-
# them.
-
1
module Model
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
1
variant const: -> { OpenAI::Models::CompletionCreateParams::Model::GPT_3_5_TURBO_INSTRUCT }
-
-
1
variant const: -> { OpenAI::Models::CompletionCreateParams::Model::DAVINCI_002 }
-
-
1
variant const: -> { OpenAI::Models::CompletionCreateParams::Model::BABBAGE_002 }
-
-
# @!method self.variants
-
# @return [Array(String, Symbol)]
-
-
1
define_sorbet_constant!(:Variants) do
-
T.type_alias { T.any(String, OpenAI::CompletionCreateParams::Model::TaggedSymbol) }
-
end
-
-
# @!group
-
-
1
GPT_3_5_TURBO_INSTRUCT = :"gpt-3.5-turbo-instruct"
-
1
DAVINCI_002 = :"davinci-002"
-
1
BABBAGE_002 = :"babbage-002"
-
-
# @!endgroup
-
end
-
-
# The prompt(s) to generate completions for, encoded as a string, array of
-
# strings, array of tokens, or array of token arrays.
-
#
-
# Note that <|endoftext|> is the document separator that the model sees during
-
# training, so if a prompt is not specified the model will generate as if from the
-
# beginning of a new document.
-
1
module Prompt
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
1
variant -> { OpenAI::Models::CompletionCreateParams::Prompt::StringArray }
-
-
1
variant -> { OpenAI::Models::CompletionCreateParams::Prompt::IntegerArray }
-
-
1
variant -> { OpenAI::Models::CompletionCreateParams::Prompt::ArrayOfToken2DArray }
-
-
# @!method self.variants
-
# @return [Array(String, Array<String>, Array<Integer>, Array<Array<Integer>>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
StringArray = OpenAI::Internal::Type::ArrayOf[String]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
IntegerArray = OpenAI::Internal::Type::ArrayOf[Integer]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
ArrayOfToken2DArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::ArrayOf[Integer]]
-
end
-
-
# Not supported with latest reasoning models `o3` and `o4-mini`.
-
#
-
# Up to 4 sequences where the API will stop generating further tokens. The
-
# returned text will not contain the stop sequence.
-
1
module Stop
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
1
variant -> { OpenAI::Models::CompletionCreateParams::Stop::StringArray }
-
-
# @!method self.variants
-
# @return [Array(String, Array<String>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
StringArray = OpenAI::Internal::Type::ArrayOf[String]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class CompletionUsage < OpenAI::Internal::Type::BaseModel
-
# @!attribute completion_tokens
-
# Number of tokens in the generated completion.
-
#
-
# @return [Integer]
-
1
required :completion_tokens, Integer
-
-
# @!attribute prompt_tokens
-
# Number of tokens in the prompt.
-
#
-
# @return [Integer]
-
1
required :prompt_tokens, Integer
-
-
# @!attribute total_tokens
-
# Total number of tokens used in the request (prompt + completion).
-
#
-
# @return [Integer]
-
1
required :total_tokens, Integer
-
-
# @!attribute completion_tokens_details
-
# Breakdown of tokens used in a completion.
-
#
-
# @return [OpenAI::Models::CompletionUsage::CompletionTokensDetails, nil]
-
1
optional :completion_tokens_details, -> { OpenAI::CompletionUsage::CompletionTokensDetails }
-
-
# @!attribute prompt_tokens_details
-
# Breakdown of tokens used in the prompt.
-
#
-
# @return [OpenAI::Models::CompletionUsage::PromptTokensDetails, nil]
-
1
optional :prompt_tokens_details, -> { OpenAI::CompletionUsage::PromptTokensDetails }
-
-
# @!method initialize(completion_tokens:, prompt_tokens:, total_tokens:, completion_tokens_details: nil, prompt_tokens_details: nil)
-
# Usage statistics for the completion request.
-
#
-
# @param completion_tokens [Integer] Number of tokens in the generated completion.
-
#
-
# @param prompt_tokens [Integer] Number of tokens in the prompt.
-
#
-
# @param total_tokens [Integer] Total number of tokens used in the request (prompt + completion).
-
#
-
# @param completion_tokens_details [OpenAI::Models::CompletionUsage::CompletionTokensDetails] Breakdown of tokens used in a completion.
-
#
-
# @param prompt_tokens_details [OpenAI::Models::CompletionUsage::PromptTokensDetails] Breakdown of tokens used in the prompt.
-
-
# @see OpenAI::Models::CompletionUsage#completion_tokens_details
-
1
class CompletionTokensDetails < OpenAI::Internal::Type::BaseModel
-
# @!attribute accepted_prediction_tokens
-
# When using Predicted Outputs, the number of tokens in the prediction that
-
# appeared in the completion.
-
#
-
# @return [Integer, nil]
-
1
optional :accepted_prediction_tokens, Integer
-
-
# @!attribute audio_tokens
-
# Audio input tokens generated by the model.
-
#
-
# @return [Integer, nil]
-
1
optional :audio_tokens, Integer
-
-
# @!attribute reasoning_tokens
-
# Tokens generated by the model for reasoning.
-
#
-
# @return [Integer, nil]
-
1
optional :reasoning_tokens, Integer
-
-
# @!attribute rejected_prediction_tokens
-
# When using Predicted Outputs, the number of tokens in the prediction that did
-
# not appear in the completion. However, like reasoning tokens, these tokens are
-
# still counted in the total completion tokens for purposes of billing, output,
-
# and context window limits.
-
#
-
# @return [Integer, nil]
-
1
optional :rejected_prediction_tokens, Integer
-
-
# @!method initialize(accepted_prediction_tokens: nil, audio_tokens: nil, reasoning_tokens: nil, rejected_prediction_tokens: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::CompletionUsage::CompletionTokensDetails} for more details.
-
#
-
# Breakdown of tokens used in a completion.
-
#
-
# @param accepted_prediction_tokens [Integer] When using Predicted Outputs, the number of tokens in the
-
#
-
# @param audio_tokens [Integer] Audio input tokens generated by the model.
-
#
-
# @param reasoning_tokens [Integer] Tokens generated by the model for reasoning.
-
#
-
# @param rejected_prediction_tokens [Integer] When using Predicted Outputs, the number of tokens in the
-
end
-
-
# @see OpenAI::Models::CompletionUsage#prompt_tokens_details
-
1
class PromptTokensDetails < OpenAI::Internal::Type::BaseModel
-
# @!attribute audio_tokens
-
# Audio input tokens present in the prompt.
-
#
-
# @return [Integer, nil]
-
1
optional :audio_tokens, Integer
-
-
# @!attribute cached_tokens
-
# Cached tokens present in the prompt.
-
#
-
# @return [Integer, nil]
-
1
optional :cached_tokens, Integer
-
-
# @!method initialize(audio_tokens: nil, cached_tokens: nil)
-
# Breakdown of tokens used in the prompt.
-
#
-
# @param audio_tokens [Integer] Audio input tokens present in the prompt.
-
#
-
# @param cached_tokens [Integer] Cached tokens present in the prompt.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class CompoundFilter < OpenAI::Internal::Type::BaseModel
-
# @!attribute filters
-
# Array of filters to combine. Items can be `ComparisonFilter` or
-
# `CompoundFilter`.
-
#
-
# @return [Array<OpenAI::Models::ComparisonFilter, Object>]
-
1
required :filters, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::CompoundFilter::Filter] }
-
-
# @!attribute type
-
# Type of operation: `and` or `or`.
-
#
-
# @return [Symbol, OpenAI::Models::CompoundFilter::Type]
-
1
required :type, enum: -> { OpenAI::CompoundFilter::Type }
-
-
# @!method initialize(filters:, type:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::CompoundFilter} for more details.
-
#
-
# Combine multiple filters using `and` or `or`.
-
#
-
# @param filters [Array<OpenAI::Models::ComparisonFilter, Object>] Array of filters to combine. Items can be `ComparisonFilter` or `CompoundFilter`
-
#
-
# @param type [Symbol, OpenAI::Models::CompoundFilter::Type] Type of operation: `and` or `or`.
-
-
# A filter used to compare a specified attribute key to a given value using a
-
# defined comparison operation.
-
1
module Filter
-
1
extend OpenAI::Internal::Type::Union
-
-
# A filter used to compare a specified attribute key to a given value using a defined comparison operation.
-
1
variant -> { OpenAI::ComparisonFilter }
-
-
1
variant OpenAI::Internal::Type::Unknown
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::ComparisonFilter, Object)]
-
end
-
-
# Type of operation: `and` or `or`.
-
#
-
# @see OpenAI::Models::CompoundFilter#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AND = :and
-
1
OR = :or
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Containers#create
-
1
class ContainerCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute name
-
# Name of the container to create.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute expires_after
-
# Container expiration time in seconds relative to the 'anchor' time.
-
#
-
# @return [OpenAI::Models::ContainerCreateParams::ExpiresAfter, nil]
-
1
optional :expires_after, -> { OpenAI::ContainerCreateParams::ExpiresAfter }
-
-
# @!attribute file_ids
-
# IDs of files to copy to the container.
-
#
-
# @return [Array<String>, nil]
-
1
optional :file_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(name:, expires_after: nil, file_ids: nil, request_options: {})
-
# @param name [String] Name of the container to create.
-
#
-
# @param expires_after [OpenAI::Models::ContainerCreateParams::ExpiresAfter] Container expiration time in seconds relative to the 'anchor' time.
-
#
-
# @param file_ids [Array<String>] IDs of files to copy to the container.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
1
class ExpiresAfter < OpenAI::Internal::Type::BaseModel
-
# @!attribute anchor
-
# Time anchor for the expiration time. Currently only 'last_active_at' is
-
# supported.
-
#
-
# @return [Symbol, OpenAI::Models::ContainerCreateParams::ExpiresAfter::Anchor]
-
1
required :anchor, enum: -> { OpenAI::ContainerCreateParams::ExpiresAfter::Anchor }
-
-
# @!attribute minutes
-
#
-
# @return [Integer]
-
1
required :minutes, Integer
-
-
# @!method initialize(anchor:, minutes:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ContainerCreateParams::ExpiresAfter} for more details.
-
#
-
# Container expiration time in seconds relative to the 'anchor' time.
-
#
-
# @param anchor [Symbol, OpenAI::Models::ContainerCreateParams::ExpiresAfter::Anchor] Time anchor for the expiration time. Currently only 'last_active_at' is supporte
-
#
-
# @param minutes [Integer]
-
-
# Time anchor for the expiration time. Currently only 'last_active_at' is
-
# supported.
-
#
-
# @see OpenAI::Models::ContainerCreateParams::ExpiresAfter#anchor
-
1
module Anchor
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
LAST_ACTIVE_AT = :last_active_at
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Containers#create
-
1
class ContainerCreateResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for the container.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# Unix timestamp (in seconds) when the container was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute name
-
# Name of the container.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute object
-
# The type of this object.
-
#
-
# @return [String]
-
1
required :object, String
-
-
# @!attribute status
-
# Status of the container (e.g., active, deleted).
-
#
-
# @return [String]
-
1
required :status, String
-
-
# @!attribute expires_after
-
# The container will expire after this time period. The anchor is the reference
-
# point for the expiration. The minutes is the number of minutes after the anchor
-
# before the container expires.
-
#
-
# @return [OpenAI::Models::ContainerCreateResponse::ExpiresAfter, nil]
-
1
optional :expires_after, -> { OpenAI::Models::ContainerCreateResponse::ExpiresAfter }
-
-
# @!method initialize(id:, created_at:, name:, object:, status:, expires_after: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ContainerCreateResponse} for more details.
-
#
-
# @param id [String] Unique identifier for the container.
-
#
-
# @param created_at [Integer] Unix timestamp (in seconds) when the container was created.
-
#
-
# @param name [String] Name of the container.
-
#
-
# @param object [String] The type of this object.
-
#
-
# @param status [String] Status of the container (e.g., active, deleted).
-
#
-
# @param expires_after [OpenAI::Models::ContainerCreateResponse::ExpiresAfter] The container will expire after this time period.
-
-
# @see OpenAI::Models::ContainerCreateResponse#expires_after
-
1
class ExpiresAfter < OpenAI::Internal::Type::BaseModel
-
# @!attribute anchor
-
# The reference point for the expiration.
-
#
-
# @return [Symbol, OpenAI::Models::ContainerCreateResponse::ExpiresAfter::Anchor, nil]
-
1
optional :anchor, enum: -> { OpenAI::Models::ContainerCreateResponse::ExpiresAfter::Anchor }
-
-
# @!attribute minutes
-
# The number of minutes after the anchor before the container expires.
-
#
-
# @return [Integer, nil]
-
1
optional :minutes, Integer
-
-
# @!method initialize(anchor: nil, minutes: nil)
-
# The container will expire after this time period. The anchor is the reference
-
# point for the expiration. The minutes is the number of minutes after the anchor
-
# before the container expires.
-
#
-
# @param anchor [Symbol, OpenAI::Models::ContainerCreateResponse::ExpiresAfter::Anchor] The reference point for the expiration.
-
#
-
# @param minutes [Integer] The number of minutes after the anchor before the container expires.
-
-
# The reference point for the expiration.
-
#
-
# @see OpenAI::Models::ContainerCreateResponse::ExpiresAfter#anchor
-
1
module Anchor
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
LAST_ACTIVE_AT = :last_active_at
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Containers#delete
-
1
class ContainerDeleteParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Containers#list
-
1
class ContainerListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute after
-
# A cursor for use in pagination. `after` is an object ID that defines your place
-
# in the list. For instance, if you make a list request and receive 100 objects,
-
# ending with obj_foo, your subsequent call can include after=obj_foo in order to
-
# fetch the next page of the list.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute limit
-
# A limit on the number of objects to be returned. Limit can range between 1 and
-
# 100, and the default is 20.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!attribute order
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
#
-
# @return [Symbol, OpenAI::Models::ContainerListParams::Order, nil]
-
1
optional :order, enum: -> { OpenAI::ContainerListParams::Order }
-
-
# @!method initialize(after: nil, limit: nil, order: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ContainerListParams} for more details.
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param order [Symbol, OpenAI::Models::ContainerListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
1
module Order
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASC = :asc
-
1
DESC = :desc
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Containers#list
-
1
class ContainerListResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for the container.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# Unix timestamp (in seconds) when the container was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute name
-
# Name of the container.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute object
-
# The type of this object.
-
#
-
# @return [String]
-
1
required :object, String
-
-
# @!attribute status
-
# Status of the container (e.g., active, deleted).
-
#
-
# @return [String]
-
1
required :status, String
-
-
# @!attribute expires_after
-
# The container will expire after this time period. The anchor is the reference
-
# point for the expiration. The minutes is the number of minutes after the anchor
-
# before the container expires.
-
#
-
# @return [OpenAI::Models::ContainerListResponse::ExpiresAfter, nil]
-
1
optional :expires_after, -> { OpenAI::Models::ContainerListResponse::ExpiresAfter }
-
-
# @!method initialize(id:, created_at:, name:, object:, status:, expires_after: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ContainerListResponse} for more details.
-
#
-
# @param id [String] Unique identifier for the container.
-
#
-
# @param created_at [Integer] Unix timestamp (in seconds) when the container was created.
-
#
-
# @param name [String] Name of the container.
-
#
-
# @param object [String] The type of this object.
-
#
-
# @param status [String] Status of the container (e.g., active, deleted).
-
#
-
# @param expires_after [OpenAI::Models::ContainerListResponse::ExpiresAfter] The container will expire after this time period.
-
-
# @see OpenAI::Models::ContainerListResponse#expires_after
-
1
class ExpiresAfter < OpenAI::Internal::Type::BaseModel
-
# @!attribute anchor
-
# The reference point for the expiration.
-
#
-
# @return [Symbol, OpenAI::Models::ContainerListResponse::ExpiresAfter::Anchor, nil]
-
1
optional :anchor, enum: -> { OpenAI::Models::ContainerListResponse::ExpiresAfter::Anchor }
-
-
# @!attribute minutes
-
# The number of minutes after the anchor before the container expires.
-
#
-
# @return [Integer, nil]
-
1
optional :minutes, Integer
-
-
# @!method initialize(anchor: nil, minutes: nil)
-
# The container will expire after this time period. The anchor is the reference
-
# point for the expiration. The minutes is the number of minutes after the anchor
-
# before the container expires.
-
#
-
# @param anchor [Symbol, OpenAI::Models::ContainerListResponse::ExpiresAfter::Anchor] The reference point for the expiration.
-
#
-
# @param minutes [Integer] The number of minutes after the anchor before the container expires.
-
-
# The reference point for the expiration.
-
#
-
# @see OpenAI::Models::ContainerListResponse::ExpiresAfter#anchor
-
1
module Anchor
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
LAST_ACTIVE_AT = :last_active_at
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Containers#retrieve
-
1
class ContainerRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Containers#retrieve
-
1
class ContainerRetrieveResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for the container.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# Unix timestamp (in seconds) when the container was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute name
-
# Name of the container.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute object
-
# The type of this object.
-
#
-
# @return [String]
-
1
required :object, String
-
-
# @!attribute status
-
# Status of the container (e.g., active, deleted).
-
#
-
# @return [String]
-
1
required :status, String
-
-
# @!attribute expires_after
-
# The container will expire after this time period. The anchor is the reference
-
# point for the expiration. The minutes is the number of minutes after the anchor
-
# before the container expires.
-
#
-
# @return [OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter, nil]
-
1
optional :expires_after, -> { OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter }
-
-
# @!method initialize(id:, created_at:, name:, object:, status:, expires_after: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ContainerRetrieveResponse} for more details.
-
#
-
# @param id [String] Unique identifier for the container.
-
#
-
# @param created_at [Integer] Unix timestamp (in seconds) when the container was created.
-
#
-
# @param name [String] Name of the container.
-
#
-
# @param object [String] The type of this object.
-
#
-
# @param status [String] Status of the container (e.g., active, deleted).
-
#
-
# @param expires_after [OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter] The container will expire after this time period.
-
-
# @see OpenAI::Models::ContainerRetrieveResponse#expires_after
-
1
class ExpiresAfter < OpenAI::Internal::Type::BaseModel
-
# @!attribute anchor
-
# The reference point for the expiration.
-
#
-
# @return [Symbol, OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::Anchor, nil]
-
1
optional :anchor, enum: -> { OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::Anchor }
-
-
# @!attribute minutes
-
# The number of minutes after the anchor before the container expires.
-
#
-
# @return [Integer, nil]
-
1
optional :minutes, Integer
-
-
# @!method initialize(anchor: nil, minutes: nil)
-
# The container will expire after this time period. The anchor is the reference
-
# point for the expiration. The minutes is the number of minutes after the anchor
-
# before the container expires.
-
#
-
# @param anchor [Symbol, OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter::Anchor] The reference point for the expiration.
-
#
-
# @param minutes [Integer] The number of minutes after the anchor before the container expires.
-
-
# The reference point for the expiration.
-
#
-
# @see OpenAI::Models::ContainerRetrieveResponse::ExpiresAfter#anchor
-
1
module Anchor
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
LAST_ACTIVE_AT = :last_active_at
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Containers
-
# @see OpenAI::Resources::Containers::Files#create
-
1
class FileCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute file
-
# The File object (not file name) to be uploaded.
-
#
-
# @return [Pathname, StringIO, IO, String, OpenAI::FilePart, nil]
-
1
optional :file, OpenAI::Internal::Type::FileInput
-
-
# @!attribute file_id
-
# Name of the file to create.
-
#
-
# @return [String, nil]
-
1
optional :file_id, String
-
-
# @!method initialize(file: nil, file_id: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Containers::FileCreateParams} for more details.
-
#
-
# @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The File object (not file name) to be uploaded.
-
#
-
# @param file_id [String] Name of the file to create.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Containers
-
# @see OpenAI::Resources::Containers::Files#create
-
1
class FileCreateResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for the file.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute bytes
-
# Size of the file in bytes.
-
#
-
# @return [Integer]
-
1
required :bytes, Integer
-
-
# @!attribute container_id
-
# The container this file belongs to.
-
#
-
# @return [String]
-
1
required :container_id, String
-
-
# @!attribute created_at
-
# Unix timestamp (in seconds) when the file was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute object
-
# The type of this object (`container.file`).
-
#
-
# @return [Symbol, :"container.file"]
-
1
required :object, const: :"container.file"
-
-
# @!attribute path
-
# Path of the file in the container.
-
#
-
# @return [String]
-
1
required :path, String
-
-
# @!attribute source
-
# Source of the file (e.g., `user`, `assistant`).
-
#
-
# @return [String]
-
1
required :source, String
-
-
# @!method initialize(id:, bytes:, container_id:, created_at:, path:, source:, object: :"container.file")
-
# @param id [String] Unique identifier for the file.
-
#
-
# @param bytes [Integer] Size of the file in bytes.
-
#
-
# @param container_id [String] The container this file belongs to.
-
#
-
# @param created_at [Integer] Unix timestamp (in seconds) when the file was created.
-
#
-
# @param path [String] Path of the file in the container.
-
#
-
# @param source [String] Source of the file (e.g., `user`, `assistant`).
-
#
-
# @param object [Symbol, :"container.file"] The type of this object (`container.file`).
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Containers
-
# @see OpenAI::Resources::Containers::Files#delete
-
1
class FileDeleteParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute container_id
-
#
-
# @return [String]
-
1
required :container_id, String
-
-
# @!method initialize(container_id:, request_options: {})
-
# @param container_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Containers
-
# @see OpenAI::Resources::Containers::Files#list
-
1
class FileListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute after
-
# A cursor for use in pagination. `after` is an object ID that defines your place
-
# in the list. For instance, if you make a list request and receive 100 objects,
-
# ending with obj_foo, your subsequent call can include after=obj_foo in order to
-
# fetch the next page of the list.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute limit
-
# A limit on the number of objects to be returned. Limit can range between 1 and
-
# 100, and the default is 20.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!attribute order
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
#
-
# @return [Symbol, OpenAI::Models::Containers::FileListParams::Order, nil]
-
1
optional :order, enum: -> { OpenAI::Containers::FileListParams::Order }
-
-
# @!method initialize(after: nil, limit: nil, order: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Containers::FileListParams} for more details.
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param order [Symbol, OpenAI::Models::Containers::FileListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
1
module Order
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASC = :asc
-
1
DESC = :desc
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Containers
-
# @see OpenAI::Resources::Containers::Files#list
-
1
class FileListResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for the file.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute bytes
-
# Size of the file in bytes.
-
#
-
# @return [Integer]
-
1
required :bytes, Integer
-
-
# @!attribute container_id
-
# The container this file belongs to.
-
#
-
# @return [String]
-
1
required :container_id, String
-
-
# @!attribute created_at
-
# Unix timestamp (in seconds) when the file was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute object
-
# The type of this object (`container.file`).
-
#
-
# @return [Symbol, :"container.file"]
-
1
required :object, const: :"container.file"
-
-
# @!attribute path
-
# Path of the file in the container.
-
#
-
# @return [String]
-
1
required :path, String
-
-
# @!attribute source
-
# Source of the file (e.g., `user`, `assistant`).
-
#
-
# @return [String]
-
1
required :source, String
-
-
# @!method initialize(id:, bytes:, container_id:, created_at:, path:, source:, object: :"container.file")
-
# @param id [String] Unique identifier for the file.
-
#
-
# @param bytes [Integer] Size of the file in bytes.
-
#
-
# @param container_id [String] The container this file belongs to.
-
#
-
# @param created_at [Integer] Unix timestamp (in seconds) when the file was created.
-
#
-
# @param path [String] Path of the file in the container.
-
#
-
# @param source [String] Source of the file (e.g., `user`, `assistant`).
-
#
-
# @param object [Symbol, :"container.file"] The type of this object (`container.file`).
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Containers
-
# @see OpenAI::Resources::Containers::Files#retrieve
-
1
class FileRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute container_id
-
#
-
# @return [String]
-
1
required :container_id, String
-
-
# @!method initialize(container_id:, request_options: {})
-
# @param container_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Containers
-
# @see OpenAI::Resources::Containers::Files#retrieve
-
1
class FileRetrieveResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for the file.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute bytes
-
# Size of the file in bytes.
-
#
-
# @return [Integer]
-
1
required :bytes, Integer
-
-
# @!attribute container_id
-
# The container this file belongs to.
-
#
-
# @return [String]
-
1
required :container_id, String
-
-
# @!attribute created_at
-
# Unix timestamp (in seconds) when the file was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute object
-
# The type of this object (`container.file`).
-
#
-
# @return [Symbol, :"container.file"]
-
1
required :object, const: :"container.file"
-
-
# @!attribute path
-
# Path of the file in the container.
-
#
-
# @return [String]
-
1
required :path, String
-
-
# @!attribute source
-
# Source of the file (e.g., `user`, `assistant`).
-
#
-
# @return [String]
-
1
required :source, String
-
-
# @!method initialize(id:, bytes:, container_id:, created_at:, path:, source:, object: :"container.file")
-
# @param id [String] Unique identifier for the file.
-
#
-
# @param bytes [Integer] Size of the file in bytes.
-
#
-
# @param container_id [String] The container this file belongs to.
-
#
-
# @param created_at [Integer] Unix timestamp (in seconds) when the file was created.
-
#
-
# @param path [String] Path of the file in the container.
-
#
-
# @param source [String] Source of the file (e.g., `user`, `assistant`).
-
#
-
# @param object [Symbol, :"container.file"] The type of this object (`container.file`).
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Containers
-
1
module Files
-
# @see OpenAI::Resources::Containers::Files::Content#retrieve
-
1
class ContentRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute container_id
-
#
-
# @return [String]
-
1
required :container_id, String
-
-
# @!method initialize(container_id:, request_options: {})
-
# @param container_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Embeddings#create
-
1
class CreateEmbeddingResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# The list of embeddings generated by the model.
-
#
-
# @return [Array<OpenAI::Models::Embedding>]
-
1
required :data, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Embedding] }
-
-
# @!attribute model
-
# The name of the model used to generate the embedding.
-
#
-
# @return [String]
-
1
required :model, String
-
-
# @!attribute object
-
# The object type, which is always "list".
-
#
-
# @return [Symbol, :list]
-
1
required :object, const: :list
-
-
# @!attribute usage
-
# The usage information for the request.
-
#
-
# @return [OpenAI::Models::CreateEmbeddingResponse::Usage]
-
1
required :usage, -> { OpenAI::CreateEmbeddingResponse::Usage }
-
-
# @!method initialize(data:, model:, usage:, object: :list)
-
# @param data [Array<OpenAI::Models::Embedding>] The list of embeddings generated by the model.
-
#
-
# @param model [String] The name of the model used to generate the embedding.
-
#
-
# @param usage [OpenAI::Models::CreateEmbeddingResponse::Usage] The usage information for the request.
-
#
-
# @param object [Symbol, :list] The object type, which is always "list".
-
-
# @see OpenAI::Models::CreateEmbeddingResponse#usage
-
1
class Usage < OpenAI::Internal::Type::BaseModel
-
# @!attribute prompt_tokens
-
# The number of tokens used by the prompt.
-
#
-
# @return [Integer]
-
1
required :prompt_tokens, Integer
-
-
# @!attribute total_tokens
-
# The total number of tokens used by the request.
-
#
-
# @return [Integer]
-
1
required :total_tokens, Integer
-
-
# @!method initialize(prompt_tokens:, total_tokens:)
-
# The usage information for the request.
-
#
-
# @param prompt_tokens [Integer] The number of tokens used by the prompt.
-
#
-
# @param total_tokens [Integer] The total number of tokens used by the request.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class Embedding < OpenAI::Internal::Type::BaseModel
-
# @!attribute embedding
-
# The embedding vector, which is a list of floats. The length of vector depends on
-
# the model as listed in the
-
# [embedding guide](https://platform.openai.com/docs/guides/embeddings).
-
#
-
# @return [Array<Float>]
-
1
required :embedding, OpenAI::Internal::Type::ArrayOf[Float]
-
-
# @!attribute index
-
# The index of the embedding in the list of embeddings.
-
#
-
# @return [Integer]
-
1
required :index, Integer
-
-
# @!attribute object
-
# The object type, which is always "embedding".
-
#
-
# @return [Symbol, :embedding]
-
1
required :object, const: :embedding
-
-
# @!method initialize(embedding:, index:, object: :embedding)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Embedding} for more details.
-
#
-
# Represents an embedding vector returned by embedding endpoint.
-
#
-
# @param embedding [Array<Float>] The embedding vector, which is a list of floats. The length of vector depends on
-
#
-
# @param index [Integer] The index of the embedding in the list of embeddings.
-
#
-
# @param object [Symbol, :embedding] The object type, which is always "embedding".
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Embeddings#create
-
1
class EmbeddingCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute input
-
# Input text to embed, encoded as a string or array of tokens. To embed multiple
-
# inputs in a single request, pass an array of strings or array of token arrays.
-
# The input must not exceed the max input tokens for the model (8192 tokens for
-
# all embedding models), cannot be an empty string, and any array must be 2048
-
# dimensions or less.
-
# [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
-
# for counting tokens. In addition to the per-input token limit, all embedding
-
# models enforce a maximum of 300,000 tokens summed across all inputs in a single
-
# request.
-
#
-
# @return [String, Array<String>, Array<Integer>, Array<Array<Integer>>]
-
1
required :input, union: -> { OpenAI::EmbeddingCreateParams::Input }
-
-
# @!attribute model
-
# ID of the model to use. You can use the
-
# [List models](https://platform.openai.com/docs/api-reference/models/list) API to
-
# see all of your available models, or see our
-
# [Model overview](https://platform.openai.com/docs/models) for descriptions of
-
# them.
-
#
-
# @return [String, Symbol, OpenAI::Models::EmbeddingModel]
-
1
required :model, union: -> { OpenAI::EmbeddingCreateParams::Model }
-
-
# @!attribute dimensions
-
# The number of dimensions the resulting output embeddings should have. Only
-
# supported in `text-embedding-3` and later models.
-
#
-
# @return [Integer, nil]
-
1
optional :dimensions, Integer
-
-
# @!attribute encoding_format
-
# The format to return the embeddings in. Can be either `float` or
-
# [`base64`](https://pypi.org/project/pybase64/).
-
#
-
# @return [Symbol, OpenAI::Models::EmbeddingCreateParams::EncodingFormat, nil]
-
1
optional :encoding_format, enum: -> { OpenAI::EmbeddingCreateParams::EncodingFormat }
-
-
# @!attribute user
-
# A unique identifier representing your end-user, which can help OpenAI to monitor
-
# and detect abuse.
-
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
-
#
-
# @return [String, nil]
-
1
optional :user, String
-
-
# @!method initialize(input:, model:, dimensions: nil, encoding_format: nil, user: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EmbeddingCreateParams} for more details.
-
#
-
# @param input [String, Array<String>, Array<Integer>, Array<Array<Integer>>] Input text to embed, encoded as a string or array of tokens. To embed multiple i
-
#
-
# @param model [String, Symbol, OpenAI::Models::EmbeddingModel] ID of the model to use. You can use the [List models](https://platform.openai.co
-
#
-
# @param dimensions [Integer] The number of dimensions the resulting output embeddings should have. Only suppo
-
#
-
# @param encoding_format [Symbol, OpenAI::Models::EmbeddingCreateParams::EncodingFormat] The format to return the embeddings in. Can be either `float` or [`base64`](http
-
#
-
# @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Input text to embed, encoded as a string or array of tokens. To embed multiple
-
# inputs in a single request, pass an array of strings or array of token arrays.
-
# The input must not exceed the max input tokens for the model (8192 tokens for
-
# all embedding models), cannot be an empty string, and any array must be 2048
-
# dimensions or less.
-
# [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
-
# for counting tokens. In addition to the per-input token limit, all embedding
-
# models enforce a maximum of 300,000 tokens summed across all inputs in a single
-
# request.
-
1
module Input
-
1
extend OpenAI::Internal::Type::Union
-
-
# The string that will be turned into an embedding.
-
1
variant String
-
-
# The array of strings that will be turned into an embedding.
-
1
variant -> { OpenAI::Models::EmbeddingCreateParams::Input::StringArray }
-
-
# The array of integers that will be turned into an embedding.
-
1
variant -> { OpenAI::Models::EmbeddingCreateParams::Input::IntegerArray }
-
-
# The array of arrays containing integers that will be turned into an embedding.
-
1
variant -> { OpenAI::Models::EmbeddingCreateParams::Input::ArrayOfToken2DArray }
-
-
# @!method self.variants
-
# @return [Array(String, Array<String>, Array<Integer>, Array<Array<Integer>>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
StringArray = OpenAI::Internal::Type::ArrayOf[String]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
IntegerArray = OpenAI::Internal::Type::ArrayOf[Integer]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
ArrayOfToken2DArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::ArrayOf[Integer]]
-
end
-
-
# ID of the model to use. You can use the
-
# [List models](https://platform.openai.com/docs/api-reference/models/list) API to
-
# see all of your available models, or see our
-
# [Model overview](https://platform.openai.com/docs/models) for descriptions of
-
# them.
-
1
module Model
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
# ID of the model to use. You can use the [List models](https://platform.openai.com/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](https://platform.openai.com/docs/models) for descriptions of them.
-
1
variant enum: -> { OpenAI::EmbeddingModel }
-
-
# @!method self.variants
-
# @return [Array(String, Symbol, OpenAI::Models::EmbeddingModel)]
-
end
-
-
# The format to return the embeddings in. Can be either `float` or
-
# [`base64`](https://pypi.org/project/pybase64/).
-
1
module EncodingFormat
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
FLOAT = :float
-
1
BASE64 = :base64
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module EmbeddingModel
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TEXT_EMBEDDING_ADA_002 = :"text-embedding-ada-002"
-
1
TEXT_EMBEDDING_3_SMALL = :"text-embedding-3-small"
-
1
TEXT_EMBEDDING_3_LARGE = :"text-embedding-3-large"
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class ErrorObject < OpenAI::Internal::Type::BaseModel
-
# @!attribute code
-
#
-
# @return [String, nil]
-
1
required :code, String, nil?: true
-
-
# @!attribute message
-
#
-
# @return [String]
-
1
required :message, String
-
-
# @!attribute param
-
#
-
# @return [String, nil]
-
1
required :param, String, nil?: true
-
-
# @!attribute type
-
#
-
# @return [String]
-
1
required :type, String
-
-
# @!method initialize(code:, message:, param:, type:)
-
# @param code [String, nil]
-
# @param message [String]
-
# @param param [String, nil]
-
# @param type [String]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Evals#create
-
1
class EvalCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute data_source_config
-
# The configuration for the data source used for the evaluation runs. Dictates the
-
# schema of the data used in the evaluation.
-
#
-
# @return [OpenAI::Models::EvalCreateParams::DataSourceConfig::Custom, OpenAI::Models::EvalCreateParams::DataSourceConfig::Logs, OpenAI::Models::EvalCreateParams::DataSourceConfig::StoredCompletions]
-
1
required :data_source_config, union: -> { OpenAI::EvalCreateParams::DataSourceConfig }
-
-
# @!attribute testing_criteria
-
# A list of graders for all eval runs in this group. Graders can reference
-
# variables in the data source using double curly braces notation, like
-
# `{{item.variable_name}}`. To reference the model's output, use the `sample`
-
# namespace (ie, `{{sample.output_text}}`).
-
#
-
# @return [Array<OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalCreateParams::TestingCriterion::TextSimilarity, OpenAI::Models::EvalCreateParams::TestingCriterion::Python, OpenAI::Models::EvalCreateParams::TestingCriterion::ScoreModel>]
-
1
required :testing_criteria,
-
-> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::EvalCreateParams::TestingCriterion] }
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute name
-
# The name of the evaluation.
-
#
-
# @return [String, nil]
-
1
optional :name, String
-
-
# @!method initialize(data_source_config:, testing_criteria:, metadata: nil, name: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalCreateParams} for more details.
-
#
-
# @param data_source_config [OpenAI::Models::EvalCreateParams::DataSourceConfig::Custom, OpenAI::Models::EvalCreateParams::DataSourceConfig::Logs, OpenAI::Models::EvalCreateParams::DataSourceConfig::StoredCompletions] The configuration for the data source used for the evaluation runs. Dictates the
-
#
-
# @param testing_criteria [Array<OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalCreateParams::TestingCriterion::TextSimilarity, OpenAI::Models::EvalCreateParams::TestingCriterion::Python, OpenAI::Models::EvalCreateParams::TestingCriterion::ScoreModel>] A list of graders for all eval runs in this group. Graders can reference variabl
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param name [String] The name of the evaluation.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# The configuration for the data source used for the evaluation runs. Dictates the
-
# schema of the data used in the evaluation.
-
1
module DataSourceConfig
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A CustomDataSourceConfig object that defines the schema for the data source used for the evaluation runs.
-
# This schema is used to define the shape of the data that will be:
-
# - Used to define your testing criteria and
-
# - What data is required when creating a run
-
1
variant :custom, -> { OpenAI::EvalCreateParams::DataSourceConfig::Custom }
-
-
# A data source config which specifies the metadata property of your logs query.
-
# This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc.
-
1
variant :logs, -> { OpenAI::EvalCreateParams::DataSourceConfig::Logs }
-
-
# Deprecated in favor of LogsDataSourceConfig.
-
1
variant :stored_completions, -> { OpenAI::EvalCreateParams::DataSourceConfig::StoredCompletions }
-
-
1
class Custom < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_schema
-
# The json schema for each row in the data source.
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :item_schema, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute type
-
# The type of data source. Always `custom`.
-
#
-
# @return [Symbol, :custom]
-
1
required :type, const: :custom
-
-
# @!attribute include_sample_schema
-
# Whether the eval should expect you to populate the sample namespace (ie, by
-
# generating responses off of your data source)
-
#
-
# @return [Boolean, nil]
-
1
optional :include_sample_schema, OpenAI::Internal::Type::Boolean
-
-
# @!method initialize(item_schema:, include_sample_schema: nil, type: :custom)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalCreateParams::DataSourceConfig::Custom} for more details.
-
#
-
# A CustomDataSourceConfig object that defines the schema for the data source used
-
# for the evaluation runs. This schema is used to define the shape of the data
-
# that will be:
-
#
-
# - Used to define your testing criteria and
-
# - What data is required when creating a run
-
#
-
# @param item_schema [Hash{Symbol=>Object}] The json schema for each row in the data source.
-
#
-
# @param include_sample_schema [Boolean] Whether the eval should expect you to populate the sample namespace (ie, by gene
-
#
-
# @param type [Symbol, :custom] The type of data source. Always `custom`.
-
end
-
-
1
class Logs < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of data source. Always `logs`.
-
#
-
# @return [Symbol, :logs]
-
1
required :type, const: :logs
-
-
# @!attribute metadata
-
# Metadata filters for the logs data source.
-
#
-
# @return [Hash{Symbol=>Object}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!method initialize(metadata: nil, type: :logs)
-
# A data source config which specifies the metadata property of your logs query.
-
# This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc.
-
#
-
# @param metadata [Hash{Symbol=>Object}] Metadata filters for the logs data source.
-
#
-
# @param type [Symbol, :logs] The type of data source. Always `logs`.
-
end
-
-
# @deprecated
-
1
class StoredCompletions < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of data source. Always `stored_completions`.
-
#
-
# @return [Symbol, :stored_completions]
-
1
required :type, const: :stored_completions
-
-
# @!attribute metadata
-
# Metadata filters for the stored completions data source.
-
#
-
# @return [Hash{Symbol=>Object}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!method initialize(metadata: nil, type: :stored_completions)
-
# Deprecated in favor of LogsDataSourceConfig.
-
#
-
# @param metadata [Hash{Symbol=>Object}] Metadata filters for the stored completions data source.
-
#
-
# @param type [Symbol, :stored_completions] The type of data source. Always `stored_completions`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::EvalCreateParams::DataSourceConfig::Custom, OpenAI::Models::EvalCreateParams::DataSourceConfig::Logs, OpenAI::Models::EvalCreateParams::DataSourceConfig::StoredCompletions)]
-
end
-
-
# A LabelModelGrader object which uses a model to assign labels to each item in
-
# the evaluation.
-
1
module TestingCriterion
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A LabelModelGrader object which uses a model to assign labels to each item
-
# in the evaluation.
-
1
variant :label_model, -> { OpenAI::EvalCreateParams::TestingCriterion::LabelModel }
-
-
# A StringCheckGrader object that performs a string comparison between input and reference using a specified operation.
-
1
variant :string_check, -> { OpenAI::Graders::StringCheckGrader }
-
-
# A TextSimilarityGrader object which grades text based on similarity metrics.
-
1
variant :text_similarity, -> { OpenAI::EvalCreateParams::TestingCriterion::TextSimilarity }
-
-
# A PythonGrader object that runs a python script on the input.
-
1
variant :python, -> { OpenAI::EvalCreateParams::TestingCriterion::Python }
-
-
# A ScoreModelGrader object that uses a model to assign a score to the input.
-
1
variant :score_model, -> { OpenAI::EvalCreateParams::TestingCriterion::ScoreModel }
-
-
1
class LabelModel < OpenAI::Internal::Type::BaseModel
-
# @!attribute input
-
# A list of chat messages forming the prompt or context. May include variable
-
# references to the `item` namespace, ie {{item.name}}.
-
#
-
# @return [Array<OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::SimpleInputMessage, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem>]
-
1
required :input,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input]
-
}
-
-
# @!attribute labels
-
# The labels to classify to each item in the evaluation.
-
#
-
# @return [Array<String>]
-
1
required :labels, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute model
-
# The model to use for the evaluation. Must support structured outputs.
-
#
-
# @return [String]
-
1
required :model, String
-
-
# @!attribute name
-
# The name of the grader.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute passing_labels
-
# The labels that indicate a passing result. Must be a subset of labels.
-
#
-
# @return [Array<String>]
-
1
required :passing_labels, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute type
-
# The object type, which is always `label_model`.
-
#
-
# @return [Symbol, :label_model]
-
1
required :type, const: :label_model
-
-
# @!method initialize(input:, labels:, model:, name:, passing_labels:, type: :label_model)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel} for more
-
# details.
-
#
-
# A LabelModelGrader object which uses a model to assign labels to each item in
-
# the evaluation.
-
#
-
# @param input [Array<OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::SimpleInputMessage, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem>] A list of chat messages forming the prompt or context. May include variable refe
-
#
-
# @param labels [Array<String>] The labels to classify to each item in the evaluation.
-
#
-
# @param model [String] The model to use for the evaluation. Must support structured outputs.
-
#
-
# @param name [String] The name of the grader.
-
#
-
# @param passing_labels [Array<String>] The labels that indicate a passing result. Must be a subset of labels.
-
#
-
# @param type [Symbol, :label_model] The object type, which is always `label_model`.
-
-
# A chat message that makes up the prompt or context. May include variable
-
# references to the `item` namespace, ie {{item.name}}.
-
1
module Input
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant -> { OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::SimpleInputMessage }
-
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
1
variant -> { OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem }
-
-
1
class SimpleInputMessage < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content of the message.
-
#
-
# @return [String]
-
1
required :content, String
-
-
# @!attribute role
-
# The role of the message (e.g. "system", "assistant", "user").
-
#
-
# @return [String]
-
1
required :role, String
-
-
# @!method initialize(content:, role:)
-
# @param content [String] The content of the message.
-
#
-
# @param role [String] The role of the message (e.g. "system", "assistant", "user").
-
end
-
-
1
class EvalItem < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# Inputs to the model - can contain template strings.
-
#
-
# @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::OutputText, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::InputImage, Array<Object>]
-
1
required :content,
-
union: -> {
-
OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content
-
}
-
-
# @!attribute role
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @return [Symbol, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Role]
-
1
required :role,
-
enum: -> {
-
OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Role
-
}
-
-
# @!attribute type
-
# The type of the message input. Always `message`.
-
#
-
# @return [Symbol, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Type, nil]
-
1
optional :type,
-
enum: -> {
-
OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Type
-
}
-
-
# @!method initialize(content:, role:, type: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem}
-
# for more details.
-
#
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
#
-
# @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::OutputText, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::InputImage, Array<Object>] Inputs to the model - can contain template strings.
-
#
-
# @param role [Symbol, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Role] The role of the message input. One of `user`, `assistant`, `system`, or
-
#
-
# @param type [Symbol, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Type] The type of the message input. Always `message`.
-
-
# Inputs to the model - can contain template strings.
-
#
-
# @see OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# A text input to the model.
-
1
variant String
-
-
# A text input to the model.
-
1
variant -> { OpenAI::Responses::ResponseInputText }
-
-
# A text output from the model.
-
1
variant -> {
-
OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::OutputText
-
}
-
-
# An image input to the model.
-
1
variant -> {
-
OpenAI::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::InputImage
-
}
-
-
# A list of inputs, each of which may be either an input text or input image object.
-
1
variant -> { OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::AnArrayOfInputTextAndInputImageArray }
-
-
1
class OutputText < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The text output from the model.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of the output text. Always `output_text`.
-
#
-
# @return [Symbol, :output_text]
-
1
required :type, const: :output_text
-
-
# @!method initialize(text:, type: :output_text)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::OutputText}
-
# for more details.
-
#
-
# A text output from the model.
-
#
-
# @param text [String] The text output from the model.
-
#
-
# @param type [Symbol, :output_text] The type of the output text. Always `output_text`.
-
end
-
-
1
class InputImage < OpenAI::Internal::Type::BaseModel
-
# @!attribute image_url
-
# The URL of the image input.
-
#
-
# @return [String]
-
1
required :image_url, String
-
-
# @!attribute type
-
# The type of the image input. Always `input_image`.
-
#
-
# @return [Symbol, :input_image]
-
1
required :type, const: :input_image
-
-
# @!attribute detail
-
# The detail level of the image to be sent to the model. One of `high`, `low`, or
-
# `auto`. Defaults to `auto`.
-
#
-
# @return [String, nil]
-
1
optional :detail, String
-
-
# @!method initialize(image_url:, detail: nil, type: :input_image)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::InputImage}
-
# for more details.
-
#
-
# An image input to the model.
-
#
-
# @param image_url [String] The URL of the image input.
-
#
-
# @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or
-
#
-
# @param type [Symbol, :input_image] The type of the image input. Always `input_image`.
-
end
-
-
# @!method self.variants
-
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::OutputText, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem::Content::InputImage, Array<Object>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
-
end
-
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @see OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem#role
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
USER = :user
-
1
ASSISTANT = :assistant
-
1
SYSTEM = :system
-
1
DEVELOPER = :developer
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The type of the message input. Always `message`.
-
#
-
# @see OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
MESSAGE = :message
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::SimpleInputMessage, OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel::Input::EvalItem)]
-
end
-
end
-
-
1
class TextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader
-
# @!attribute pass_threshold
-
# The threshold for the score.
-
#
-
# @return [Float]
-
1
required :pass_threshold, Float
-
-
# @!method initialize(pass_threshold:)
-
# A TextSimilarityGrader object which grades text based on similarity metrics.
-
#
-
# @param pass_threshold [Float] The threshold for the score.
-
end
-
-
1
class Python < OpenAI::Models::Graders::PythonGrader
-
# @!attribute pass_threshold
-
# The threshold for the score.
-
#
-
# @return [Float, nil]
-
1
optional :pass_threshold, Float
-
-
# @!method initialize(pass_threshold: nil)
-
# A PythonGrader object that runs a python script on the input.
-
#
-
# @param pass_threshold [Float] The threshold for the score.
-
end
-
-
1
class ScoreModel < OpenAI::Models::Graders::ScoreModelGrader
-
# @!attribute pass_threshold
-
# The threshold for the score.
-
#
-
# @return [Float, nil]
-
1
optional :pass_threshold, Float
-
-
# @!method initialize(pass_threshold: nil)
-
# A ScoreModelGrader object that uses a model to assign a score to the input.
-
#
-
# @param pass_threshold [Float] The threshold for the score.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalCreateParams::TestingCriterion::TextSimilarity, OpenAI::Models::EvalCreateParams::TestingCriterion::Python, OpenAI::Models::EvalCreateParams::TestingCriterion::ScoreModel)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Evals#create
-
1
class EvalCreateResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for the evaluation.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the eval was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data_source_config
-
# Configuration of data sources used in runs of the evaluation.
-
#
-
# @return [OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalCreateResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig]
-
1
required :data_source_config, union: -> { OpenAI::Models::EvalCreateResponse::DataSourceConfig }
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute name
-
# The name of the evaluation.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute object
-
# The object type.
-
#
-
# @return [Symbol, :eval]
-
1
required :object, const: :eval
-
-
# @!attribute testing_criteria
-
# A list of testing criteria.
-
#
-
# @return [Array<OpenAI::Models::Graders::LabelModelGrader, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderTextSimilarity, OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderPython, OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderScoreModel>]
-
1
required :testing_criteria,
-
-> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::EvalCreateResponse::TestingCriterion] }
-
-
# @!method initialize(id:, created_at:, data_source_config:, metadata:, name:, testing_criteria:, object: :eval)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalCreateResponse} for more details.
-
#
-
# An Eval object with a data source config and testing criteria. An Eval
-
# represents a task to be done for your LLM integration. Like:
-
#
-
# - Improve the quality of my chatbot
-
# - See how well my chatbot handles customer support
-
# - Check if o4-mini is better at my usecase than gpt-4o
-
#
-
# @param id [String] Unique identifier for the evaluation.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the eval was created.
-
#
-
# @param data_source_config [OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalCreateResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig] Configuration of data sources used in runs of the evaluation.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param name [String] The name of the evaluation.
-
#
-
# @param testing_criteria [Array<OpenAI::Models::Graders::LabelModelGrader, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderTextSimilarity, OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderPython, OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderScoreModel>] A list of testing criteria.
-
#
-
# @param object [Symbol, :eval] The object type.
-
-
# Configuration of data sources used in runs of the evaluation.
-
#
-
# @see OpenAI::Models::EvalCreateResponse#data_source_config
-
1
module DataSourceConfig
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A CustomDataSourceConfig which specifies the schema of your `item` and optionally `sample` namespaces.
-
# The response schema defines the shape of the data that will be:
-
# - Used to define your testing criteria and
-
# - What data is required when creating a run
-
1
variant :custom, -> { OpenAI::EvalCustomDataSourceConfig }
-
-
# A LogsDataSourceConfig which specifies the metadata property of your logs query.
-
# This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc.
-
# The schema returned by this data source config is used to defined what variables are available in your evals.
-
# `item` and `sample` are both defined when using this data source config.
-
1
variant :logs, -> { OpenAI::Models::EvalCreateResponse::DataSourceConfig::Logs }
-
-
# Deprecated in favor of LogsDataSourceConfig.
-
1
variant :stored_completions, -> { OpenAI::EvalStoredCompletionsDataSourceConfig }
-
-
1
class Logs < OpenAI::Internal::Type::BaseModel
-
# @!attribute schema
-
# The json schema for the run data source items. Learn how to build JSON schemas
-
# [here](https://json-schema.org/).
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :schema, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute type
-
# The type of data source. Always `logs`.
-
#
-
# @return [Symbol, :logs]
-
1
required :type, const: :logs
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!method initialize(schema:, metadata: nil, type: :logs)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalCreateResponse::DataSourceConfig::Logs} for more details.
-
#
-
# A LogsDataSourceConfig which specifies the metadata property of your logs query.
-
# This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The
-
# schema returned by this data source config is used to defined what variables are
-
# available in your evals. `item` and `sample` are both defined when using this
-
# data source config.
-
#
-
# @param schema [Hash{Symbol=>Object}] The json schema for the run data source items.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param type [Symbol, :logs] The type of data source. Always `logs`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalCreateResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig)]
-
end
-
-
# A LabelModelGrader object which uses a model to assign labels to each item in
-
# the evaluation.
-
1
module TestingCriterion
-
1
extend OpenAI::Internal::Type::Union
-
-
# A LabelModelGrader object which uses a model to assign labels to each item
-
# in the evaluation.
-
1
variant -> { OpenAI::Graders::LabelModelGrader }
-
-
# A StringCheckGrader object that performs a string comparison between input and reference using a specified operation.
-
1
variant -> { OpenAI::Graders::StringCheckGrader }
-
-
# A TextSimilarityGrader object which grades text based on similarity metrics.
-
1
variant -> { OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderTextSimilarity }
-
-
# A PythonGrader object that runs a python script on the input.
-
1
variant -> { OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderPython }
-
-
# A ScoreModelGrader object that uses a model to assign a score to the input.
-
1
variant -> { OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderScoreModel }
-
-
1
class EvalGraderTextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader
-
# @!attribute pass_threshold
-
# The threshold for the score.
-
#
-
# @return [Float]
-
1
required :pass_threshold, Float
-
-
# @!method initialize(pass_threshold:)
-
# A TextSimilarityGrader object which grades text based on similarity metrics.
-
#
-
# @param pass_threshold [Float] The threshold for the score.
-
end
-
-
1
class EvalGraderPython < OpenAI::Models::Graders::PythonGrader
-
# @!attribute pass_threshold
-
# The threshold for the score.
-
#
-
# @return [Float, nil]
-
1
optional :pass_threshold, Float
-
-
# @!method initialize(pass_threshold: nil)
-
# A PythonGrader object that runs a python script on the input.
-
#
-
# @param pass_threshold [Float] The threshold for the score.
-
end
-
-
1
class EvalGraderScoreModel < OpenAI::Models::Graders::ScoreModelGrader
-
# @!attribute pass_threshold
-
# The threshold for the score.
-
#
-
# @return [Float, nil]
-
1
optional :pass_threshold, Float
-
-
# @!method initialize(pass_threshold: nil)
-
# A ScoreModelGrader object that uses a model to assign a score to the input.
-
#
-
# @param pass_threshold [Float] The threshold for the score.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Graders::LabelModelGrader, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderTextSimilarity, OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderPython, OpenAI::Models::EvalCreateResponse::TestingCriterion::EvalGraderScoreModel)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class EvalCustomDataSourceConfig < OpenAI::Internal::Type::BaseModel
-
# @!attribute schema
-
# The json schema for the run data source items. Learn how to build JSON schemas
-
# [here](https://json-schema.org/).
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :schema, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute type
-
# The type of data source. Always `custom`.
-
#
-
# @return [Symbol, :custom]
-
1
required :type, const: :custom
-
-
# @!method initialize(schema:, type: :custom)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalCustomDataSourceConfig} for more details.
-
#
-
# A CustomDataSourceConfig which specifies the schema of your `item` and
-
# optionally `sample` namespaces. The response schema defines the shape of the
-
# data that will be:
-
#
-
# - Used to define your testing criteria and
-
# - What data is required when creating a run
-
#
-
# @param schema [Hash{Symbol=>Object}] The json schema for the run data source items.
-
#
-
# @param type [Symbol, :custom] The type of data source. Always `custom`.
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Evals#delete
-
1
class EvalDeleteParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Evals#delete
-
1
class EvalDeleteResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute deleted
-
#
-
# @return [Boolean]
-
1
required :deleted, OpenAI::Internal::Type::Boolean
-
-
# @!attribute eval_id
-
#
-
# @return [String]
-
1
required :eval_id, String
-
-
# @!attribute object
-
#
-
# @return [String]
-
1
required :object, String
-
-
# @!method initialize(deleted:, eval_id:, object:)
-
# @param deleted [Boolean]
-
# @param eval_id [String]
-
# @param object [String]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Evals#list
-
1
class EvalListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute after
-
# Identifier for the last eval from the previous pagination request.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute limit
-
# Number of evals to retrieve.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!attribute order
-
# Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for
-
# descending order.
-
#
-
# @return [Symbol, OpenAI::Models::EvalListParams::Order, nil]
-
1
optional :order, enum: -> { OpenAI::EvalListParams::Order }
-
-
# @!attribute order_by
-
# Evals can be ordered by creation time or last updated time. Use `created_at` for
-
# creation time or `updated_at` for last updated time.
-
#
-
# @return [Symbol, OpenAI::Models::EvalListParams::OrderBy, nil]
-
1
optional :order_by, enum: -> { OpenAI::EvalListParams::OrderBy }
-
-
# @!method initialize(after: nil, limit: nil, order: nil, order_by: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalListParams} for more details.
-
#
-
# @param after [String] Identifier for the last eval from the previous pagination request.
-
#
-
# @param limit [Integer] Number of evals to retrieve.
-
#
-
# @param order [Symbol, OpenAI::Models::EvalListParams::Order] Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for d
-
#
-
# @param order_by [Symbol, OpenAI::Models::EvalListParams::OrderBy] Evals can be ordered by creation time or last updated time. Use
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for
-
# descending order.
-
1
module Order
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASC = :asc
-
1
DESC = :desc
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# Evals can be ordered by creation time or last updated time. Use `created_at` for
-
# creation time or `updated_at` for last updated time.
-
1
module OrderBy
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
CREATED_AT = :created_at
-
1
UPDATED_AT = :updated_at
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Evals#list
-
1
class EvalListResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for the evaluation.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the eval was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data_source_config
-
# Configuration of data sources used in runs of the evaluation.
-
#
-
# @return [OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalListResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig]
-
1
required :data_source_config, union: -> { OpenAI::Models::EvalListResponse::DataSourceConfig }
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute name
-
# The name of the evaluation.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute object
-
# The object type.
-
#
-
# @return [Symbol, :eval]
-
1
required :object, const: :eval
-
-
# @!attribute testing_criteria
-
# A list of testing criteria.
-
#
-
# @return [Array<OpenAI::Models::Graders::LabelModelGrader, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderTextSimilarity, OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderPython, OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderScoreModel>]
-
1
required :testing_criteria,
-
-> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::EvalListResponse::TestingCriterion] }
-
-
# @!method initialize(id:, created_at:, data_source_config:, metadata:, name:, testing_criteria:, object: :eval)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalListResponse} for more details.
-
#
-
# An Eval object with a data source config and testing criteria. An Eval
-
# represents a task to be done for your LLM integration. Like:
-
#
-
# - Improve the quality of my chatbot
-
# - See how well my chatbot handles customer support
-
# - Check if o4-mini is better at my usecase than gpt-4o
-
#
-
# @param id [String] Unique identifier for the evaluation.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the eval was created.
-
#
-
# @param data_source_config [OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalListResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig] Configuration of data sources used in runs of the evaluation.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param name [String] The name of the evaluation.
-
#
-
# @param testing_criteria [Array<OpenAI::Models::Graders::LabelModelGrader, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderTextSimilarity, OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderPython, OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderScoreModel>] A list of testing criteria.
-
#
-
# @param object [Symbol, :eval] The object type.
-
-
# Configuration of data sources used in runs of the evaluation.
-
#
-
# @see OpenAI::Models::EvalListResponse#data_source_config
-
1
module DataSourceConfig
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A CustomDataSourceConfig which specifies the schema of your `item` and optionally `sample` namespaces.
-
# The response schema defines the shape of the data that will be:
-
# - Used to define your testing criteria and
-
# - What data is required when creating a run
-
1
variant :custom, -> { OpenAI::EvalCustomDataSourceConfig }
-
-
# A LogsDataSourceConfig which specifies the metadata property of your logs query.
-
# This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc.
-
# The schema returned by this data source config is used to defined what variables are available in your evals.
-
# `item` and `sample` are both defined when using this data source config.
-
1
variant :logs, -> { OpenAI::Models::EvalListResponse::DataSourceConfig::Logs }
-
-
# Deprecated in favor of LogsDataSourceConfig.
-
1
variant :stored_completions, -> { OpenAI::EvalStoredCompletionsDataSourceConfig }
-
-
1
class Logs < OpenAI::Internal::Type::BaseModel
-
# @!attribute schema
-
# The json schema for the run data source items. Learn how to build JSON schemas
-
# [here](https://json-schema.org/).
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :schema, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute type
-
# The type of data source. Always `logs`.
-
#
-
# @return [Symbol, :logs]
-
1
required :type, const: :logs
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!method initialize(schema:, metadata: nil, type: :logs)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalListResponse::DataSourceConfig::Logs} for more details.
-
#
-
# A LogsDataSourceConfig which specifies the metadata property of your logs query.
-
# This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The
-
# schema returned by this data source config is used to defined what variables are
-
# available in your evals. `item` and `sample` are both defined when using this
-
# data source config.
-
#
-
# @param schema [Hash{Symbol=>Object}] The json schema for the run data source items.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param type [Symbol, :logs] The type of data source. Always `logs`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalListResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig)]
-
end
-
-
# A LabelModelGrader object which uses a model to assign labels to each item in
-
# the evaluation.
-
1
module TestingCriterion
-
1
extend OpenAI::Internal::Type::Union
-
-
# A LabelModelGrader object which uses a model to assign labels to each item
-
# in the evaluation.
-
1
variant -> { OpenAI::Graders::LabelModelGrader }
-
-
# A StringCheckGrader object that performs a string comparison between input and reference using a specified operation.
-
1
variant -> { OpenAI::Graders::StringCheckGrader }
-
-
# A TextSimilarityGrader object which grades text based on similarity metrics.
-
1
variant -> { OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderTextSimilarity }
-
-
# A PythonGrader object that runs a python script on the input.
-
1
variant -> { OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderPython }
-
-
# A ScoreModelGrader object that uses a model to assign a score to the input.
-
1
variant -> { OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderScoreModel }
-
-
1
class EvalGraderTextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader
-
# @!attribute pass_threshold
-
# The threshold for the score.
-
#
-
# @return [Float]
-
1
required :pass_threshold, Float
-
-
# @!method initialize(pass_threshold:)
-
# A TextSimilarityGrader object which grades text based on similarity metrics.
-
#
-
# @param pass_threshold [Float] The threshold for the score.
-
end
-
-
1
class EvalGraderPython < OpenAI::Models::Graders::PythonGrader
-
# @!attribute pass_threshold
-
# The threshold for the score.
-
#
-
# @return [Float, nil]
-
1
optional :pass_threshold, Float
-
-
# @!method initialize(pass_threshold: nil)
-
# A PythonGrader object that runs a python script on the input.
-
#
-
# @param pass_threshold [Float] The threshold for the score.
-
end
-
-
1
class EvalGraderScoreModel < OpenAI::Models::Graders::ScoreModelGrader
-
# @!attribute pass_threshold
-
# The threshold for the score.
-
#
-
# @return [Float, nil]
-
1
optional :pass_threshold, Float
-
-
# @!method initialize(pass_threshold: nil)
-
# A ScoreModelGrader object that uses a model to assign a score to the input.
-
#
-
# @param pass_threshold [Float] The threshold for the score.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Graders::LabelModelGrader, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderTextSimilarity, OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderPython, OpenAI::Models::EvalListResponse::TestingCriterion::EvalGraderScoreModel)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Evals#retrieve
-
1
class EvalRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Evals#retrieve
-
1
class EvalRetrieveResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for the evaluation.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the eval was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data_source_config
-
# Configuration of data sources used in runs of the evaluation.
-
#
-
# @return [OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalRetrieveResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig]
-
1
required :data_source_config, union: -> { OpenAI::Models::EvalRetrieveResponse::DataSourceConfig }
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute name
-
# The name of the evaluation.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute object
-
# The object type.
-
#
-
# @return [Symbol, :eval]
-
1
required :object, const: :eval
-
-
# @!attribute testing_criteria
-
# A list of testing criteria.
-
#
-
# @return [Array<OpenAI::Models::Graders::LabelModelGrader, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderTextSimilarity, OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderPython, OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderScoreModel>]
-
1
required :testing_criteria,
-
-> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::EvalRetrieveResponse::TestingCriterion] }
-
-
# @!method initialize(id:, created_at:, data_source_config:, metadata:, name:, testing_criteria:, object: :eval)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalRetrieveResponse} for more details.
-
#
-
# An Eval object with a data source config and testing criteria. An Eval
-
# represents a task to be done for your LLM integration. Like:
-
#
-
# - Improve the quality of my chatbot
-
# - See how well my chatbot handles customer support
-
# - Check if o4-mini is better at my usecase than gpt-4o
-
#
-
# @param id [String] Unique identifier for the evaluation.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the eval was created.
-
#
-
# @param data_source_config [OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalRetrieveResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig] Configuration of data sources used in runs of the evaluation.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param name [String] The name of the evaluation.
-
#
-
# @param testing_criteria [Array<OpenAI::Models::Graders::LabelModelGrader, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderTextSimilarity, OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderPython, OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderScoreModel>] A list of testing criteria.
-
#
-
# @param object [Symbol, :eval] The object type.
-
-
# Configuration of data sources used in runs of the evaluation.
-
#
-
# @see OpenAI::Models::EvalRetrieveResponse#data_source_config
-
1
module DataSourceConfig
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A CustomDataSourceConfig which specifies the schema of your `item` and optionally `sample` namespaces.
-
# The response schema defines the shape of the data that will be:
-
# - Used to define your testing criteria and
-
# - What data is required when creating a run
-
1
variant :custom, -> { OpenAI::EvalCustomDataSourceConfig }
-
-
# A LogsDataSourceConfig which specifies the metadata property of your logs query.
-
# This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc.
-
# The schema returned by this data source config is used to defined what variables are available in your evals.
-
# `item` and `sample` are both defined when using this data source config.
-
1
variant :logs, -> { OpenAI::Models::EvalRetrieveResponse::DataSourceConfig::Logs }
-
-
# Deprecated in favor of LogsDataSourceConfig.
-
1
variant :stored_completions, -> { OpenAI::EvalStoredCompletionsDataSourceConfig }
-
-
1
class Logs < OpenAI::Internal::Type::BaseModel
-
# @!attribute schema
-
# The json schema for the run data source items. Learn how to build JSON schemas
-
# [here](https://json-schema.org/).
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :schema, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute type
-
# The type of data source. Always `logs`.
-
#
-
# @return [Symbol, :logs]
-
1
required :type, const: :logs
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!method initialize(schema:, metadata: nil, type: :logs)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalRetrieveResponse::DataSourceConfig::Logs} for more details.
-
#
-
# A LogsDataSourceConfig which specifies the metadata property of your logs query.
-
# This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The
-
# schema returned by this data source config is used to defined what variables are
-
# available in your evals. `item` and `sample` are both defined when using this
-
# data source config.
-
#
-
# @param schema [Hash{Symbol=>Object}] The json schema for the run data source items.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param type [Symbol, :logs] The type of data source. Always `logs`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalRetrieveResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig)]
-
end
-
-
# A LabelModelGrader object which uses a model to assign labels to each item in
-
# the evaluation.
-
1
module TestingCriterion
-
1
extend OpenAI::Internal::Type::Union
-
-
# A LabelModelGrader object which uses a model to assign labels to each item
-
# in the evaluation.
-
1
variant -> { OpenAI::Graders::LabelModelGrader }
-
-
# A StringCheckGrader object that performs a string comparison between input and reference using a specified operation.
-
1
variant -> { OpenAI::Graders::StringCheckGrader }
-
-
# A TextSimilarityGrader object which grades text based on similarity metrics.
-
1
variant -> { OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderTextSimilarity }
-
-
# A PythonGrader object that runs a python script on the input.
-
1
variant -> { OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderPython }
-
-
# A ScoreModelGrader object that uses a model to assign a score to the input.
-
1
variant -> { OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderScoreModel }
-
-
1
class EvalGraderTextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader
-
# @!attribute pass_threshold
-
# The threshold for the score.
-
#
-
# @return [Float]
-
1
required :pass_threshold, Float
-
-
# @!method initialize(pass_threshold:)
-
# A TextSimilarityGrader object which grades text based on similarity metrics.
-
#
-
# @param pass_threshold [Float] The threshold for the score.
-
end
-
-
1
class EvalGraderPython < OpenAI::Models::Graders::PythonGrader
-
# @!attribute pass_threshold
-
# The threshold for the score.
-
#
-
# @return [Float, nil]
-
1
optional :pass_threshold, Float
-
-
# @!method initialize(pass_threshold: nil)
-
# A PythonGrader object that runs a python script on the input.
-
#
-
# @param pass_threshold [Float] The threshold for the score.
-
end
-
-
1
class EvalGraderScoreModel < OpenAI::Models::Graders::ScoreModelGrader
-
# @!attribute pass_threshold
-
# The threshold for the score.
-
#
-
# @return [Float, nil]
-
1
optional :pass_threshold, Float
-
-
# @!method initialize(pass_threshold: nil)
-
# A ScoreModelGrader object that uses a model to assign a score to the input.
-
#
-
# @param pass_threshold [Float] The threshold for the score.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Graders::LabelModelGrader, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderTextSimilarity, OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderPython, OpenAI::Models::EvalRetrieveResponse::TestingCriterion::EvalGraderScoreModel)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @deprecated
-
1
class EvalStoredCompletionsDataSourceConfig < OpenAI::Internal::Type::BaseModel
-
# @!attribute schema
-
# The json schema for the run data source items. Learn how to build JSON schemas
-
# [here](https://json-schema.org/).
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :schema, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute type
-
# The type of data source. Always `stored_completions`.
-
#
-
# @return [Symbol, :stored_completions]
-
1
required :type, const: :stored_completions
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!method initialize(schema:, metadata: nil, type: :stored_completions)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalStoredCompletionsDataSourceConfig} for more details.
-
#
-
# Deprecated in favor of LogsDataSourceConfig.
-
#
-
# @param schema [Hash{Symbol=>Object}] The json schema for the run data source items.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param type [Symbol, :stored_completions] The type of data source. Always `stored_completions`.
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Evals#update
-
1
class EvalUpdateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute name
-
# Rename the evaluation.
-
#
-
# @return [String, nil]
-
1
optional :name, String
-
-
# @!method initialize(metadata: nil, name: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalUpdateParams} for more details.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param name [String] Rename the evaluation.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Evals#update
-
1
class EvalUpdateResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for the evaluation.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the eval was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data_source_config
-
# Configuration of data sources used in runs of the evaluation.
-
#
-
# @return [OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalUpdateResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig]
-
1
required :data_source_config, union: -> { OpenAI::Models::EvalUpdateResponse::DataSourceConfig }
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute name
-
# The name of the evaluation.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute object
-
# The object type.
-
#
-
# @return [Symbol, :eval]
-
1
required :object, const: :eval
-
-
# @!attribute testing_criteria
-
# A list of testing criteria.
-
#
-
# @return [Array<OpenAI::Models::Graders::LabelModelGrader, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderTextSimilarity, OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderPython, OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderScoreModel>]
-
1
required :testing_criteria,
-
-> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::EvalUpdateResponse::TestingCriterion] }
-
-
# @!method initialize(id:, created_at:, data_source_config:, metadata:, name:, testing_criteria:, object: :eval)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalUpdateResponse} for more details.
-
#
-
# An Eval object with a data source config and testing criteria. An Eval
-
# represents a task to be done for your LLM integration. Like:
-
#
-
# - Improve the quality of my chatbot
-
# - See how well my chatbot handles customer support
-
# - Check if o4-mini is better at my usecase than gpt-4o
-
#
-
# @param id [String] Unique identifier for the evaluation.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the eval was created.
-
#
-
# @param data_source_config [OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalUpdateResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig] Configuration of data sources used in runs of the evaluation.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param name [String] The name of the evaluation.
-
#
-
# @param testing_criteria [Array<OpenAI::Models::Graders::LabelModelGrader, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderTextSimilarity, OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderPython, OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderScoreModel>] A list of testing criteria.
-
#
-
# @param object [Symbol, :eval] The object type.
-
-
# Configuration of data sources used in runs of the evaluation.
-
#
-
# @see OpenAI::Models::EvalUpdateResponse#data_source_config
-
1
module DataSourceConfig
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A CustomDataSourceConfig which specifies the schema of your `item` and optionally `sample` namespaces.
-
# The response schema defines the shape of the data that will be:
-
# - Used to define your testing criteria and
-
# - What data is required when creating a run
-
1
variant :custom, -> { OpenAI::EvalCustomDataSourceConfig }
-
-
# A LogsDataSourceConfig which specifies the metadata property of your logs query.
-
# This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc.
-
# The schema returned by this data source config is used to defined what variables are available in your evals.
-
# `item` and `sample` are both defined when using this data source config.
-
1
variant :logs, -> { OpenAI::Models::EvalUpdateResponse::DataSourceConfig::Logs }
-
-
# Deprecated in favor of LogsDataSourceConfig.
-
1
variant :stored_completions, -> { OpenAI::EvalStoredCompletionsDataSourceConfig }
-
-
1
class Logs < OpenAI::Internal::Type::BaseModel
-
# @!attribute schema
-
# The json schema for the run data source items. Learn how to build JSON schemas
-
# [here](https://json-schema.org/).
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :schema, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute type
-
# The type of data source. Always `logs`.
-
#
-
# @return [Symbol, :logs]
-
1
required :type, const: :logs
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!method initialize(schema:, metadata: nil, type: :logs)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalUpdateResponse::DataSourceConfig::Logs} for more details.
-
#
-
# A LogsDataSourceConfig which specifies the metadata property of your logs query.
-
# This is usually metadata like `usecase=chatbot` or `prompt-version=v2`, etc. The
-
# schema returned by this data source config is used to defined what variables are
-
# available in your evals. `item` and `sample` are both defined when using this
-
# data source config.
-
#
-
# @param schema [Hash{Symbol=>Object}] The json schema for the run data source items.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param type [Symbol, :logs] The type of data source. Always `logs`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::EvalCustomDataSourceConfig, OpenAI::Models::EvalUpdateResponse::DataSourceConfig::Logs, OpenAI::Models::EvalStoredCompletionsDataSourceConfig)]
-
end
-
-
# A LabelModelGrader object which uses a model to assign labels to each item in
-
# the evaluation.
-
1
module TestingCriterion
-
1
extend OpenAI::Internal::Type::Union
-
-
# A LabelModelGrader object which uses a model to assign labels to each item
-
# in the evaluation.
-
1
variant -> { OpenAI::Graders::LabelModelGrader }
-
-
# A StringCheckGrader object that performs a string comparison between input and reference using a specified operation.
-
1
variant -> { OpenAI::Graders::StringCheckGrader }
-
-
# A TextSimilarityGrader object which grades text based on similarity metrics.
-
1
variant -> { OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderTextSimilarity }
-
-
# A PythonGrader object that runs a python script on the input.
-
1
variant -> { OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderPython }
-
-
# A ScoreModelGrader object that uses a model to assign a score to the input.
-
1
variant -> { OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderScoreModel }
-
-
1
class EvalGraderTextSimilarity < OpenAI::Models::Graders::TextSimilarityGrader
-
# @!attribute pass_threshold
-
# The threshold for the score.
-
#
-
# @return [Float]
-
1
required :pass_threshold, Float
-
-
# @!method initialize(pass_threshold:)
-
# A TextSimilarityGrader object which grades text based on similarity metrics.
-
#
-
# @param pass_threshold [Float] The threshold for the score.
-
end
-
-
1
class EvalGraderPython < OpenAI::Models::Graders::PythonGrader
-
# @!attribute pass_threshold
-
# The threshold for the score.
-
#
-
# @return [Float, nil]
-
1
optional :pass_threshold, Float
-
-
# @!method initialize(pass_threshold: nil)
-
# A PythonGrader object that runs a python script on the input.
-
#
-
# @param pass_threshold [Float] The threshold for the score.
-
end
-
-
1
class EvalGraderScoreModel < OpenAI::Models::Graders::ScoreModelGrader
-
# @!attribute pass_threshold
-
# The threshold for the score.
-
#
-
# @return [Float, nil]
-
1
optional :pass_threshold, Float
-
-
# @!method initialize(pass_threshold: nil)
-
# A ScoreModelGrader object that uses a model to assign a score to the input.
-
#
-
# @param pass_threshold [Float] The threshold for the score.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Graders::LabelModelGrader, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderTextSimilarity, OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderPython, OpenAI::Models::EvalUpdateResponse::TestingCriterion::EvalGraderScoreModel)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Evals
-
1
class CreateEvalCompletionsRunDataSource < OpenAI::Internal::Type::BaseModel
-
# @!attribute source
-
# Determines what populates the `item` namespace in this run's data source.
-
#
-
# @return [OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::FileID, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::StoredCompletions]
-
1
required :source, union: -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source }
-
-
# @!attribute type
-
# The type of run data source. Always `completions`.
-
#
-
# @return [Symbol, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Type]
-
1
required :type, enum: -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::Type }
-
-
# @!attribute input_messages
-
# Used when sampling from a model. Dictates the structure of the messages passed
-
# into the model. Can either be a reference to a prebuilt trajectory (ie,
-
# `item.input_trajectory`), or a template with variable references to the `item`
-
# namespace.
-
#
-
# @return [OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::ItemReference, nil]
-
1
optional :input_messages,
-
union: -> {
-
OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages
-
}
-
-
# @!attribute model
-
# The name of the model to use for generating completions (e.g. "o3-mini").
-
#
-
# @return [String, nil]
-
1
optional :model, String
-
-
# @!attribute sampling_params
-
#
-
# @return [OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams, nil]
-
1
optional :sampling_params, -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams }
-
-
# @!method initialize(source:, type:, input_messages: nil, model: nil, sampling_params: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource} for more details.
-
#
-
# A CompletionsRunDataSource object describing a model sampling configuration.
-
#
-
# @param source [OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::FileID, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::StoredCompletions] Determines what populates the `item` namespace in this run's data source.
-
#
-
# @param type [Symbol, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Type] The type of run data source. Always `completions`.
-
#
-
# @param input_messages [OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::ItemReference] Used when sampling from a model. Dictates the structure of the messages passed i
-
#
-
# @param model [String] The name of the model to use for generating completions (e.g. "o3-mini").
-
#
-
# @param sampling_params [OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams]
-
-
# Determines what populates the `item` namespace in this run's data source.
-
#
-
# @see OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource#source
-
1
module Source
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :file_content, -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent }
-
-
1
variant :file_id, -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileID }
-
-
# A StoredCompletionsRunDataSource configuration describing a set of filters
-
1
variant :stored_completions,
-
-> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::StoredCompletions }
-
-
1
class FileContent < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content of the jsonl file.
-
#
-
# @return [Array<OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent::Content>]
-
1
required :content,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent::Content]
-
}
-
-
# @!attribute type
-
# The type of jsonl source. Always `file_content`.
-
#
-
# @return [Symbol, :file_content]
-
1
required :type, const: :file_content
-
-
# @!method initialize(content:, type: :file_content)
-
# @param content [Array<OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent::Content>] The content of the jsonl file.
-
#
-
# @param type [Symbol, :file_content] The type of jsonl source. Always `file_content`.
-
-
1
class Content < OpenAI::Internal::Type::BaseModel
-
# @!attribute item
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :item, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute sample
-
#
-
# @return [Hash{Symbol=>Object}, nil]
-
1
optional :sample, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!method initialize(item:, sample: nil)
-
# @param item [Hash{Symbol=>Object}]
-
# @param sample [Hash{Symbol=>Object}]
-
end
-
end
-
-
1
class FileID < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The identifier of the file.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute type
-
# The type of jsonl source. Always `file_id`.
-
#
-
# @return [Symbol, :file_id]
-
1
required :type, const: :file_id
-
-
# @!method initialize(id:, type: :file_id)
-
# @param id [String] The identifier of the file.
-
#
-
# @param type [Symbol, :file_id] The type of jsonl source. Always `file_id`.
-
end
-
-
1
class StoredCompletions < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of source. Always `stored_completions`.
-
#
-
# @return [Symbol, :stored_completions]
-
1
required :type, const: :stored_completions
-
-
# @!attribute created_after
-
# An optional Unix timestamp to filter items created after this time.
-
#
-
# @return [Integer, nil]
-
1
optional :created_after, Integer, nil?: true
-
-
# @!attribute created_before
-
# An optional Unix timestamp to filter items created before this time.
-
#
-
# @return [Integer, nil]
-
1
optional :created_before, Integer, nil?: true
-
-
# @!attribute limit
-
# An optional maximum number of items to return.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer, nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute model
-
# An optional model to filter by (e.g., 'gpt-4o').
-
#
-
# @return [String, nil]
-
1
optional :model, String, nil?: true
-
-
# @!method initialize(created_after: nil, created_before: nil, limit: nil, metadata: nil, model: nil, type: :stored_completions)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::StoredCompletions}
-
# for more details.
-
#
-
# A StoredCompletionsRunDataSource configuration describing a set of filters
-
#
-
# @param created_after [Integer, nil] An optional Unix timestamp to filter items created after this time.
-
#
-
# @param created_before [Integer, nil] An optional Unix timestamp to filter items created before this time.
-
#
-
# @param limit [Integer, nil] An optional maximum number of items to return.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param model [String, nil] An optional model to filter by (e.g., 'gpt-4o').
-
#
-
# @param type [Symbol, :stored_completions] The type of source. Always `stored_completions`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::FileContent, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::FileID, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::Source::StoredCompletions)]
-
end
-
-
# The type of run data source. Always `completions`.
-
#
-
# @see OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
COMPLETIONS = :completions
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# Used when sampling from a model. Dictates the structure of the messages passed
-
# into the model. Can either be a reference to a prebuilt trajectory (ie,
-
# `item.input_trajectory`), or a template with variable references to the `item`
-
# namespace.
-
#
-
# @see OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource#input_messages
-
1
module InputMessages
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :template, -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template }
-
-
1
variant :item_reference,
-
-> { OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::ItemReference }
-
-
1
class Template < OpenAI::Internal::Type::BaseModel
-
# @!attribute template
-
# A list of chat messages forming the prompt or context. May include variable
-
# references to the `item` namespace, ie {{item.name}}.
-
#
-
# @return [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message>]
-
1
required :template,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template]
-
}
-
-
# @!attribute type
-
# The type of input messages. Always `template`.
-
#
-
# @return [Symbol, :template]
-
1
required :type, const: :template
-
-
# @!method initialize(template:, type: :template)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template}
-
# for more details.
-
#
-
# @param template [Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message>] A list of chat messages forming the prompt or context. May include variable refe
-
#
-
# @param type [Symbol, :template] The type of input messages. Always `template`.
-
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
1
module Template
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
1
variant :message, -> { OpenAI::Responses::EasyInputMessage }
-
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
1
variant :message,
-
-> {
-
OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message
-
}
-
-
1
class Message < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# Inputs to the model - can contain template strings.
-
#
-
# @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Content::OutputText, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Content::InputImage, Array<Object>]
-
1
required :content,
-
union: -> {
-
OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Content
-
}
-
-
# @!attribute role
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @return [Symbol, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Role]
-
1
required :role,
-
enum: -> {
-
OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Role
-
}
-
-
# @!attribute type
-
# The type of the message input. Always `message`.
-
#
-
# @return [Symbol, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Type, nil]
-
1
optional :type,
-
enum: -> {
-
OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Type
-
}
-
-
# @!method initialize(content:, role:, type: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message}
-
# for more details.
-
#
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
#
-
# @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Content::OutputText, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Content::InputImage, Array<Object>] Inputs to the model - can contain template strings.
-
#
-
# @param role [Symbol, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Role] The role of the message input. One of `user`, `assistant`, `system`, or
-
#
-
# @param type [Symbol, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Type] The type of the message input. Always `message`.
-
-
# Inputs to the model - can contain template strings.
-
#
-
# @see OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# A text input to the model.
-
1
variant String
-
-
# A text input to the model.
-
1
variant -> { OpenAI::Responses::ResponseInputText }
-
-
# A text output from the model.
-
1
variant -> {
-
OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Content::OutputText
-
}
-
-
# An image input to the model.
-
1
variant -> {
-
OpenAI::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Content::InputImage
-
}
-
-
# A list of inputs, each of which may be either an input text or input image object.
-
1
variant -> { OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Content::AnArrayOfInputTextAndInputImageArray }
-
-
1
class OutputText < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The text output from the model.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of the output text. Always `output_text`.
-
#
-
# @return [Symbol, :output_text]
-
1
required :type, const: :output_text
-
-
# @!method initialize(text:, type: :output_text)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Content::OutputText}
-
# for more details.
-
#
-
# A text output from the model.
-
#
-
# @param text [String] The text output from the model.
-
#
-
# @param type [Symbol, :output_text] The type of the output text. Always `output_text`.
-
end
-
-
1
class InputImage < OpenAI::Internal::Type::BaseModel
-
# @!attribute image_url
-
# The URL of the image input.
-
#
-
# @return [String]
-
1
required :image_url, String
-
-
# @!attribute type
-
# The type of the image input. Always `input_image`.
-
#
-
# @return [Symbol, :input_image]
-
1
required :type, const: :input_image
-
-
# @!attribute detail
-
# The detail level of the image to be sent to the model. One of `high`, `low`, or
-
# `auto`. Defaults to `auto`.
-
#
-
# @return [String, nil]
-
1
optional :detail, String
-
-
# @!method initialize(image_url:, detail: nil, type: :input_image)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Content::InputImage}
-
# for more details.
-
#
-
# An image input to the model.
-
#
-
# @param image_url [String] The URL of the image input.
-
#
-
# @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or
-
#
-
# @param type [Symbol, :input_image] The type of the image input. Always `input_image`.
-
end
-
-
# @!method self.variants
-
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Content::OutputText, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message::Content::InputImage, Array<Object>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
-
end
-
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @see OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message#role
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
USER = :user
-
1
ASSISTANT = :assistant
-
1
SYSTEM = :system
-
1
DEVELOPER = :developer
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The type of the message input. Always `message`.
-
#
-
# @see OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
MESSAGE = :message
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template::Template::Message)]
-
end
-
end
-
-
1
class ItemReference < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_reference
-
# A reference to a variable in the `item` namespace. Ie, "item.input_trajectory"
-
#
-
# @return [String]
-
1
required :item_reference, String
-
-
# @!attribute type
-
# The type of input messages. Always `item_reference`.
-
#
-
# @return [Symbol, :item_reference]
-
1
required :type, const: :item_reference
-
-
# @!method initialize(item_reference:, type: :item_reference)
-
# @param item_reference [String] A reference to a variable in the `item` namespace. Ie, "item.input_trajectory"
-
#
-
# @param type [Symbol, :item_reference] The type of input messages. Always `item_reference`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::Template, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::InputMessages::ItemReference)]
-
end
-
-
# @see OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource#sampling_params
-
1
class SamplingParams < OpenAI::Internal::Type::BaseModel
-
# @!attribute max_completion_tokens
-
# The maximum number of tokens in the generated output.
-
#
-
# @return [Integer, nil]
-
1
optional :max_completion_tokens, Integer
-
-
# @!attribute response_format
-
# An object specifying the format that the model must output.
-
#
-
# Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
-
# Outputs which ensures the model will match your supplied JSON schema. Learn more
-
# in the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
-
# ensures the message the model generates is valid JSON. Using `json_schema` is
-
# preferred for models that support it.
-
#
-
# @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject, nil]
-
1
optional :response_format,
-
union: -> {
-
OpenAI::Evals::CreateEvalCompletionsRunDataSource::SamplingParams::ResponseFormat
-
}
-
-
# @!attribute seed
-
# A seed value to initialize the randomness, during sampling.
-
#
-
# @return [Integer, nil]
-
1
optional :seed, Integer
-
-
# @!attribute temperature
-
# A higher temperature increases randomness in the outputs.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float
-
-
# @!attribute tools
-
# A list of tools the model may call. Currently, only functions are supported as a
-
# tool. Use this to provide a list of functions the model may generate JSON inputs
-
# for. A max of 128 functions are supported.
-
#
-
# @return [Array<OpenAI::Models::Chat::ChatCompletionTool>, nil]
-
1
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Chat::ChatCompletionTool] }
-
-
# @!attribute top_p
-
# An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float
-
-
# @!method initialize(max_completion_tokens: nil, response_format: nil, seed: nil, temperature: nil, tools: nil, top_p: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams} for
-
# more details.
-
#
-
# @param max_completion_tokens [Integer] The maximum number of tokens in the generated output.
-
#
-
# @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
-
#
-
# @param seed [Integer] A seed value to initialize the randomness, during sampling.
-
#
-
# @param temperature [Float] A higher temperature increases randomness in the outputs.
-
#
-
# @param tools [Array<OpenAI::Models::Chat::ChatCompletionTool>] A list of tools the model may call. Currently, only functions are supported as a
-
#
-
# @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
-
-
# An object specifying the format that the model must output.
-
#
-
# Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
-
# Outputs which ensures the model will match your supplied JSON schema. Learn more
-
# in the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
-
# ensures the message the model generates is valid JSON. Using `json_schema` is
-
# preferred for models that support it.
-
#
-
# @see OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource::SamplingParams#response_format
-
1
module ResponseFormat
-
1
extend OpenAI::Internal::Type::Union
-
-
# Default response format. Used to generate text responses.
-
1
variant -> { OpenAI::ResponseFormatText }
-
-
# JSON Schema response format. Used to generate structured JSON responses.
-
# Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
-
1
variant -> { OpenAI::ResponseFormatJSONSchema }
-
-
# JSON object response format. An older method of generating JSON responses.
-
# Using `json_schema` is recommended for models that support it. Note that the
-
# model will not generate JSON without a system or user message instructing it
-
# to do so.
-
1
variant -> { OpenAI::ResponseFormatJSONObject }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject)]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Evals
-
1
class CreateEvalJSONLRunDataSource < OpenAI::Internal::Type::BaseModel
-
# @!attribute source
-
# Determines what populates the `item` namespace in the data source.
-
#
-
# @return [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::Source::FileContent, OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::Source::FileID]
-
1
required :source, union: -> { OpenAI::Evals::CreateEvalJSONLRunDataSource::Source }
-
-
# @!attribute type
-
# The type of data source. Always `jsonl`.
-
#
-
# @return [Symbol, :jsonl]
-
1
required :type, const: :jsonl
-
-
# @!method initialize(source:, type: :jsonl)
-
# A JsonlRunDataSource object with that specifies a JSONL file that matches the
-
# eval
-
#
-
# @param source [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::Source::FileContent, OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::Source::FileID] Determines what populates the `item` namespace in the data source.
-
#
-
# @param type [Symbol, :jsonl] The type of data source. Always `jsonl`.
-
-
# Determines what populates the `item` namespace in the data source.
-
#
-
# @see OpenAI::Models::Evals::CreateEvalJSONLRunDataSource#source
-
1
module Source
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :file_content, -> { OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileContent }
-
-
1
variant :file_id, -> { OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileID }
-
-
1
class FileContent < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content of the jsonl file.
-
#
-
# @return [Array<OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::Source::FileContent::Content>]
-
1
required :content,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Evals::CreateEvalJSONLRunDataSource::Source::FileContent::Content]
-
}
-
-
# @!attribute type
-
# The type of jsonl source. Always `file_content`.
-
#
-
# @return [Symbol, :file_content]
-
1
required :type, const: :file_content
-
-
# @!method initialize(content:, type: :file_content)
-
# @param content [Array<OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::Source::FileContent::Content>] The content of the jsonl file.
-
#
-
# @param type [Symbol, :file_content] The type of jsonl source. Always `file_content`.
-
-
1
class Content < OpenAI::Internal::Type::BaseModel
-
# @!attribute item
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :item, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute sample
-
#
-
# @return [Hash{Symbol=>Object}, nil]
-
1
optional :sample, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!method initialize(item:, sample: nil)
-
# @param item [Hash{Symbol=>Object}]
-
# @param sample [Hash{Symbol=>Object}]
-
end
-
end
-
-
1
class FileID < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The identifier of the file.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute type
-
# The type of jsonl source. Always `file_id`.
-
#
-
# @return [Symbol, :file_id]
-
1
required :type, const: :file_id
-
-
# @!method initialize(id:, type: :file_id)
-
# @param id [String] The identifier of the file.
-
#
-
# @param type [Symbol, :file_id] The type of jsonl source. Always `file_id`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::Source::FileContent, OpenAI::Models::Evals::CreateEvalJSONLRunDataSource::Source::FileID)]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Evals
-
1
class EvalAPIError < OpenAI::Internal::Type::BaseModel
-
# @!attribute code
-
# The error code.
-
#
-
# @return [String]
-
1
required :code, String
-
-
# @!attribute message
-
# The error message.
-
#
-
# @return [String]
-
1
required :message, String
-
-
# @!method initialize(code:, message:)
-
# An object representing an error response from the Eval API.
-
#
-
# @param code [String] The error code.
-
#
-
# @param message [String] The error message.
-
end
-
end
-
-
1
EvalAPIError = Evals::EvalAPIError
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Evals
-
# @see OpenAI::Resources::Evals::Runs#cancel
-
1
class RunCancelParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute eval_id
-
#
-
# @return [String]
-
1
required :eval_id, String
-
-
# @!method initialize(eval_id:, request_options: {})
-
# @param eval_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Evals
-
# @see OpenAI::Resources::Evals::Runs#cancel
-
1
class RunCancelResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for the evaluation run.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# Unix timestamp (in seconds) when the evaluation run was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data_source
-
# Information about the run's data source.
-
#
-
# @return [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses]
-
1
required :data_source, union: -> { OpenAI::Models::Evals::RunCancelResponse::DataSource }
-
-
# @!attribute error
-
# An object representing an error response from the Eval API.
-
#
-
# @return [OpenAI::Models::Evals::EvalAPIError]
-
1
required :error, -> { OpenAI::Evals::EvalAPIError }
-
-
# @!attribute eval_id
-
# The identifier of the associated evaluation.
-
#
-
# @return [String]
-
1
required :eval_id, String
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute model
-
# The model that is evaluated, if applicable.
-
#
-
# @return [String]
-
1
required :model, String
-
-
# @!attribute name
-
# The name of the evaluation run.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute object
-
# The type of the object. Always "eval.run".
-
#
-
# @return [Symbol, :"eval.run"]
-
1
required :object, const: :"eval.run"
-
-
# @!attribute per_model_usage
-
# Usage statistics for each model during the evaluation run.
-
#
-
# @return [Array<OpenAI::Models::Evals::RunCancelResponse::PerModelUsage>]
-
1
required :per_model_usage,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunCancelResponse::PerModelUsage] }
-
-
# @!attribute per_testing_criteria_results
-
# Results per testing criteria applied during the evaluation run.
-
#
-
# @return [Array<OpenAI::Models::Evals::RunCancelResponse::PerTestingCriteriaResult>]
-
1
required :per_testing_criteria_results,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunCancelResponse::PerTestingCriteriaResult] }
-
-
# @!attribute report_url
-
# The URL to the rendered evaluation run report on the UI dashboard.
-
#
-
# @return [String]
-
1
required :report_url, String
-
-
# @!attribute result_counts
-
# Counters summarizing the outcomes of the evaluation run.
-
#
-
# @return [OpenAI::Models::Evals::RunCancelResponse::ResultCounts]
-
1
required :result_counts, -> { OpenAI::Models::Evals::RunCancelResponse::ResultCounts }
-
-
# @!attribute status
-
# The status of the evaluation run.
-
#
-
# @return [String]
-
1
required :status, String
-
-
# @!method initialize(id:, created_at:, data_source:, error:, eval_id:, metadata:, model:, name:, per_model_usage:, per_testing_criteria_results:, report_url:, result_counts:, status:, object: :"eval.run")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCancelResponse} for more details.
-
#
-
# A schema representing an evaluation run.
-
#
-
# @param id [String] Unique identifier for the evaluation run.
-
#
-
# @param created_at [Integer] Unix timestamp (in seconds) when the evaluation run was created.
-
#
-
# @param data_source [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses] Information about the run's data source.
-
#
-
# @param error [OpenAI::Models::Evals::EvalAPIError] An object representing an error response from the Eval API.
-
#
-
# @param eval_id [String] The identifier of the associated evaluation.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param model [String] The model that is evaluated, if applicable.
-
#
-
# @param name [String] The name of the evaluation run.
-
#
-
# @param per_model_usage [Array<OpenAI::Models::Evals::RunCancelResponse::PerModelUsage>] Usage statistics for each model during the evaluation run.
-
#
-
# @param per_testing_criteria_results [Array<OpenAI::Models::Evals::RunCancelResponse::PerTestingCriteriaResult>] Results per testing criteria applied during the evaluation run.
-
#
-
# @param report_url [String] The URL to the rendered evaluation run report on the UI dashboard.
-
#
-
# @param result_counts [OpenAI::Models::Evals::RunCancelResponse::ResultCounts] Counters summarizing the outcomes of the evaluation run.
-
#
-
# @param status [String] The status of the evaluation run.
-
#
-
# @param object [Symbol, :"eval.run"] The type of the object. Always "eval.run".
-
-
# Information about the run's data source.
-
#
-
# @see OpenAI::Models::Evals::RunCancelResponse#data_source
-
1
module DataSource
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A JsonlRunDataSource object with that specifies a JSONL file that matches the eval
-
1
variant :jsonl, -> { OpenAI::Evals::CreateEvalJSONLRunDataSource }
-
-
# A CompletionsRunDataSource object describing a model sampling configuration.
-
1
variant :completions, -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource }
-
-
# A ResponsesRunDataSource object describing a model sampling configuration.
-
1
variant :responses, -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses }
-
-
1
class Responses < OpenAI::Internal::Type::BaseModel
-
# @!attribute source
-
# Determines what populates the `item` namespace in this run's data source.
-
#
-
# @return [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::Responses]
-
1
required :source, union: -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source }
-
-
# @!attribute type
-
# The type of run data source. Always `responses`.
-
#
-
# @return [Symbol, :responses]
-
1
required :type, const: :responses
-
-
# @!attribute input_messages
-
# Used when sampling from a model. Dictates the structure of the messages passed
-
# into the model. Can either be a reference to a prebuilt trajectory (ie,
-
# `item.input_trajectory`), or a template with variable references to the `item`
-
# namespace.
-
#
-
# @return [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::ItemReference, nil]
-
1
optional :input_messages,
-
union: -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages }
-
-
# @!attribute model
-
# The name of the model to use for generating completions (e.g. "o3-mini").
-
#
-
# @return [String, nil]
-
1
optional :model, String
-
-
# @!attribute sampling_params
-
#
-
# @return [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams, nil]
-
1
optional :sampling_params,
-
-> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams }
-
-
# @!method initialize(source:, input_messages: nil, model: nil, sampling_params: nil, type: :responses)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses} for more
-
# details.
-
#
-
# A ResponsesRunDataSource object describing a model sampling configuration.
-
#
-
# @param source [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::Responses] Determines what populates the `item` namespace in this run's data source.
-
#
-
# @param input_messages [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::ItemReference] Used when sampling from a model. Dictates the structure of the messages passed i
-
#
-
# @param model [String] The name of the model to use for generating completions (e.g. "o3-mini").
-
#
-
# @param sampling_params [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams]
-
#
-
# @param type [Symbol, :responses] The type of run data source. Always `responses`.
-
-
# Determines what populates the `item` namespace in this run's data source.
-
#
-
# @see OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses#source
-
1
module Source
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :file_content,
-
-> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent }
-
-
1
variant :file_id, -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileID }
-
-
# A EvalResponsesSource object describing a run data source configuration.
-
1
variant :responses,
-
-> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::Responses }
-
-
1
class FileContent < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content of the jsonl file.
-
#
-
# @return [Array<OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent::Content>]
-
1
required :content,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent::Content] }
-
-
# @!attribute type
-
# The type of jsonl source. Always `file_content`.
-
#
-
# @return [Symbol, :file_content]
-
1
required :type, const: :file_content
-
-
# @!method initialize(content:, type: :file_content)
-
# @param content [Array<OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent::Content>] The content of the jsonl file.
-
#
-
# @param type [Symbol, :file_content] The type of jsonl source. Always `file_content`.
-
-
1
class Content < OpenAI::Internal::Type::BaseModel
-
# @!attribute item
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :item, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute sample
-
#
-
# @return [Hash{Symbol=>Object}, nil]
-
1
optional :sample, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!method initialize(item:, sample: nil)
-
# @param item [Hash{Symbol=>Object}]
-
# @param sample [Hash{Symbol=>Object}]
-
end
-
end
-
-
1
class FileID < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The identifier of the file.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute type
-
# The type of jsonl source. Always `file_id`.
-
#
-
# @return [Symbol, :file_id]
-
1
required :type, const: :file_id
-
-
# @!method initialize(id:, type: :file_id)
-
# @param id [String] The identifier of the file.
-
#
-
# @param type [Symbol, :file_id] The type of jsonl source. Always `file_id`.
-
end
-
-
1
class Responses < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of run data source. Always `responses`.
-
#
-
# @return [Symbol, :responses]
-
1
required :type, const: :responses
-
-
# @!attribute created_after
-
# Only include items created after this timestamp (inclusive). This is a query
-
# parameter used to select responses.
-
#
-
# @return [Integer, nil]
-
1
optional :created_after, Integer, nil?: true
-
-
# @!attribute created_before
-
# Only include items created before this timestamp (inclusive). This is a query
-
# parameter used to select responses.
-
#
-
# @return [Integer, nil]
-
1
optional :created_before, Integer, nil?: true
-
-
# @!attribute instructions_search
-
# Optional string to search the 'instructions' field. This is a query parameter
-
# used to select responses.
-
#
-
# @return [String, nil]
-
1
optional :instructions_search, String, nil?: true
-
-
# @!attribute metadata
-
# Metadata filter for the responses. This is a query parameter used to select
-
# responses.
-
#
-
# @return [Object, nil]
-
1
optional :metadata, OpenAI::Internal::Type::Unknown, nil?: true
-
-
# @!attribute model
-
# The name of the model to find responses for. This is a query parameter used to
-
# select responses.
-
#
-
# @return [String, nil]
-
1
optional :model, String, nil?: true
-
-
# @!attribute reasoning_effort
-
# Optional reasoning effort parameter. This is a query parameter used to select
-
# responses.
-
#
-
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
-
1
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
-
-
# @!attribute temperature
-
# Sampling temperature. This is a query parameter used to select responses.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float, nil?: true
-
-
# @!attribute tools
-
# List of tool names. This is a query parameter used to select responses.
-
#
-
# @return [Array<String>, nil]
-
1
optional :tools, OpenAI::Internal::Type::ArrayOf[String], nil?: true
-
-
# @!attribute top_p
-
# Nucleus sampling parameter. This is a query parameter used to select responses.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float, nil?: true
-
-
# @!attribute users
-
# List of user identifiers. This is a query parameter used to select responses.
-
#
-
# @return [Array<String>, nil]
-
1
optional :users, OpenAI::Internal::Type::ArrayOf[String], nil?: true
-
-
# @!method initialize(created_after: nil, created_before: nil, instructions_search: nil, metadata: nil, model: nil, reasoning_effort: nil, temperature: nil, tools: nil, top_p: nil, users: nil, type: :responses)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::Responses}
-
# for more details.
-
#
-
# A EvalResponsesSource object describing a run data source configuration.
-
#
-
# @param created_after [Integer, nil] Only include items created after this timestamp (inclusive). This is a query par
-
#
-
# @param created_before [Integer, nil] Only include items created before this timestamp (inclusive). This is a query pa
-
#
-
# @param instructions_search [String, nil] Optional string to search the 'instructions' field. This is a query parameter us
-
#
-
# @param metadata [Object, nil] Metadata filter for the responses. This is a query parameter used to select resp
-
#
-
# @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s
-
#
-
# @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re
-
#
-
# @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses.
-
#
-
# @param tools [Array<String>, nil] List of tool names. This is a query parameter used to select responses.
-
#
-
# @param top_p [Float, nil] Nucleus sampling parameter. This is a query parameter used to select responses.
-
#
-
# @param users [Array<String>, nil] List of user identifiers. This is a query parameter used to select responses.
-
#
-
# @param type [Symbol, :responses] The type of run data source. Always `responses`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::Source::Responses)]
-
end
-
-
# Used when sampling from a model. Dictates the structure of the messages passed
-
# into the model. Can either be a reference to a prebuilt trajectory (ie,
-
# `item.input_trajectory`), or a template with variable references to the `item`
-
# namespace.
-
#
-
# @see OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses#input_messages
-
1
module InputMessages
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :template,
-
-> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template }
-
-
1
variant :item_reference,
-
-> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::ItemReference }
-
-
1
class Template < OpenAI::Internal::Type::BaseModel
-
# @!attribute template
-
# A list of chat messages forming the prompt or context. May include variable
-
# references to the `item` namespace, ie {{item.name}}.
-
#
-
# @return [Array<OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem>]
-
1
required :template,
-
-> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template] }
-
-
# @!attribute type
-
# The type of input messages. Always `template`.
-
#
-
# @return [Symbol, :template]
-
1
required :type, const: :template
-
-
# @!method initialize(template:, type: :template)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template}
-
# for more details.
-
#
-
# @param template [Array<OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem>] A list of chat messages forming the prompt or context. May include variable refe
-
#
-
# @param type [Symbol, :template] The type of input messages. Always `template`.
-
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
1
module Template
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage }
-
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
1
variant -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem }
-
-
1
class ChatMessage < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content of the message.
-
#
-
# @return [String]
-
1
required :content, String
-
-
# @!attribute role
-
# The role of the message (e.g. "system", "assistant", "user").
-
#
-
# @return [String]
-
1
required :role, String
-
-
# @!method initialize(content:, role:)
-
# @param content [String] The content of the message.
-
#
-
# @param role [String] The role of the message (e.g. "system", "assistant", "user").
-
end
-
-
1
class EvalItem < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# Inputs to the model - can contain template strings.
-
#
-
# @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>]
-
1
required :content,
-
union: -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content }
-
-
# @!attribute role
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @return [Symbol, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role]
-
1
required :role,
-
enum: -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role }
-
-
# @!attribute type
-
# The type of the message input. Always `message`.
-
#
-
# @return [Symbol, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type, nil]
-
1
optional :type,
-
enum: -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type }
-
-
# @!method initialize(content:, role:, type: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem}
-
# for more details.
-
#
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
#
-
# @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>] Inputs to the model - can contain template strings.
-
#
-
# @param role [Symbol, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role] The role of the message input. One of `user`, `assistant`, `system`, or
-
#
-
# @param type [Symbol, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type] The type of the message input. Always `message`.
-
-
# Inputs to the model - can contain template strings.
-
#
-
# @see OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# A text input to the model.
-
1
variant String
-
-
# A text input to the model.
-
1
variant -> { OpenAI::Responses::ResponseInputText }
-
-
# A text output from the model.
-
1
variant -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText }
-
-
# An image input to the model.
-
1
variant -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage }
-
-
# A list of inputs, each of which may be either an input text or input image object.
-
1
variant -> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::AnArrayOfInputTextAndInputImageArray }
-
-
1
class OutputText < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The text output from the model.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of the output text. Always `output_text`.
-
#
-
# @return [Symbol, :output_text]
-
1
required :type, const: :output_text
-
-
# @!method initialize(text:, type: :output_text)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText}
-
# for more details.
-
#
-
# A text output from the model.
-
#
-
# @param text [String] The text output from the model.
-
#
-
# @param type [Symbol, :output_text] The type of the output text. Always `output_text`.
-
end
-
-
1
class InputImage < OpenAI::Internal::Type::BaseModel
-
# @!attribute image_url
-
# The URL of the image input.
-
#
-
# @return [String]
-
1
required :image_url, String
-
-
# @!attribute type
-
# The type of the image input. Always `input_image`.
-
#
-
# @return [Symbol, :input_image]
-
1
required :type, const: :input_image
-
-
# @!attribute detail
-
# The detail level of the image to be sent to the model. One of `high`, `low`, or
-
# `auto`. Defaults to `auto`.
-
#
-
# @return [String, nil]
-
1
optional :detail, String
-
-
# @!method initialize(image_url:, detail: nil, type: :input_image)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage}
-
# for more details.
-
#
-
# An image input to the model.
-
#
-
# @param image_url [String] The URL of the image input.
-
#
-
# @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or
-
#
-
# @param type [Symbol, :input_image] The type of the image input. Always `input_image`.
-
end
-
-
# @!method self.variants
-
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
-
end
-
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @see OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#role
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
USER = :user
-
1
ASSISTANT = :assistant
-
1
SYSTEM = :system
-
1
DEVELOPER = :developer
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The type of the message input. Always `message`.
-
#
-
# @see OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
MESSAGE = :message
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem)]
-
end
-
end
-
-
1
class ItemReference < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_reference
-
# A reference to a variable in the `item` namespace. Ie, "item.name"
-
#
-
# @return [String]
-
1
required :item_reference, String
-
-
# @!attribute type
-
# The type of input messages. Always `item_reference`.
-
#
-
# @return [Symbol, :item_reference]
-
1
required :type, const: :item_reference
-
-
# @!method initialize(item_reference:, type: :item_reference)
-
# @param item_reference [String] A reference to a variable in the `item` namespace. Ie, "item.name"
-
#
-
# @param type [Symbol, :item_reference] The type of input messages. Always `item_reference`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::InputMessages::ItemReference)]
-
end
-
-
# @see OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses#sampling_params
-
1
class SamplingParams < OpenAI::Internal::Type::BaseModel
-
# @!attribute max_completion_tokens
-
# The maximum number of tokens in the generated output.
-
#
-
# @return [Integer, nil]
-
1
optional :max_completion_tokens, Integer
-
-
# @!attribute seed
-
# A seed value to initialize the randomness, during sampling.
-
#
-
# @return [Integer, nil]
-
1
optional :seed, Integer
-
-
# @!attribute temperature
-
# A higher temperature increases randomness in the outputs.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float
-
-
# @!attribute text
-
# Configuration options for a text response from the model. Can be plain text or
-
# structured JSON data. Learn more:
-
#
-
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
#
-
# @return [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text, nil]
-
1
optional :text,
-
-> { OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text }
-
-
# @!attribute tools
-
# An array of tools the model may call while generating a response. You can
-
# specify which tool to use by setting the `tool_choice` parameter.
-
#
-
# The two categories of tools you can provide the model are:
-
#
-
# - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
-
# capabilities, like
-
# [web search](https://platform.openai.com/docs/guides/tools-web-search) or
-
# [file search](https://platform.openai.com/docs/guides/tools-file-search).
-
# Learn more about
-
# [built-in tools](https://platform.openai.com/docs/guides/tools).
-
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
-
# the model to call your own code. Learn more about
-
# [function calling](https://platform.openai.com/docs/guides/function-calling).
-
#
-
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>, nil]
-
1
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
-
-
# @!attribute top_p
-
# An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float
-
-
# @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams}
-
# for more details.
-
#
-
# @param max_completion_tokens [Integer] The maximum number of tokens in the generated output.
-
#
-
# @param seed [Integer] A seed value to initialize the randomness, during sampling.
-
#
-
# @param temperature [Float] A higher temperature increases randomness in the outputs.
-
#
-
# @param text [OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
-
#
-
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
-
#
-
# @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
-
-
# @see OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams#text
-
1
class Text < OpenAI::Internal::Type::BaseModel
-
# @!attribute format_
-
# An object specifying the format that the model must output.
-
#
-
# Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
-
# ensures the model will match your supplied JSON schema. Learn more in the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# The default format is `{ "type": "text" }` with no additional options.
-
#
-
# **Not recommended for gpt-4o and newer models:**
-
#
-
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
-
# ensures the message the model generates is valid JSON. Using `json_schema` is
-
# preferred for models that support it.
-
#
-
# @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil]
-
1
optional :format_,
-
union: -> {
-
OpenAI::Responses::ResponseFormatTextConfig
-
},
-
api_name: :format
-
-
# @!method initialize(format_: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses::SamplingParams::Text}
-
# for more details.
-
#
-
# Configuration options for a text response from the model. Can be plain text or
-
# structured JSON data. Learn more:
-
#
-
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
#
-
# @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
-
end
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCancelResponse::DataSource::Responses)]
-
end
-
-
1
class PerModelUsage < OpenAI::Internal::Type::BaseModel
-
# @!attribute cached_tokens
-
# The number of tokens retrieved from cache.
-
#
-
# @return [Integer]
-
1
required :cached_tokens, Integer
-
-
# @!attribute completion_tokens
-
# The number of completion tokens generated.
-
#
-
# @return [Integer]
-
1
required :completion_tokens, Integer
-
-
# @!attribute invocation_count
-
# The number of invocations.
-
#
-
# @return [Integer]
-
1
required :invocation_count, Integer
-
-
# @!attribute model_name
-
# The name of the model.
-
#
-
# @return [String]
-
1
required :model_name, String
-
-
# @!attribute prompt_tokens
-
# The number of prompt tokens used.
-
#
-
# @return [Integer]
-
1
required :prompt_tokens, Integer
-
-
# @!attribute total_tokens
-
# The total number of tokens used.
-
#
-
# @return [Integer]
-
1
required :total_tokens, Integer
-
-
# @!method initialize(cached_tokens:, completion_tokens:, invocation_count:, model_name:, prompt_tokens:, total_tokens:)
-
# @param cached_tokens [Integer] The number of tokens retrieved from cache.
-
#
-
# @param completion_tokens [Integer] The number of completion tokens generated.
-
#
-
# @param invocation_count [Integer] The number of invocations.
-
#
-
# @param model_name [String] The name of the model.
-
#
-
# @param prompt_tokens [Integer] The number of prompt tokens used.
-
#
-
# @param total_tokens [Integer] The total number of tokens used.
-
end
-
-
1
class PerTestingCriteriaResult < OpenAI::Internal::Type::BaseModel
-
# @!attribute failed
-
# Number of tests failed for this criteria.
-
#
-
# @return [Integer]
-
1
required :failed, Integer
-
-
# @!attribute passed
-
# Number of tests passed for this criteria.
-
#
-
# @return [Integer]
-
1
required :passed, Integer
-
-
# @!attribute testing_criteria
-
# A description of the testing criteria.
-
#
-
# @return [String]
-
1
required :testing_criteria, String
-
-
# @!method initialize(failed:, passed:, testing_criteria:)
-
# @param failed [Integer] Number of tests failed for this criteria.
-
#
-
# @param passed [Integer] Number of tests passed for this criteria.
-
#
-
# @param testing_criteria [String] A description of the testing criteria.
-
end
-
-
# @see OpenAI::Models::Evals::RunCancelResponse#result_counts
-
1
class ResultCounts < OpenAI::Internal::Type::BaseModel
-
# @!attribute errored
-
# Number of output items that resulted in an error.
-
#
-
# @return [Integer]
-
1
required :errored, Integer
-
-
# @!attribute failed
-
# Number of output items that failed to pass the evaluation.
-
#
-
# @return [Integer]
-
1
required :failed, Integer
-
-
# @!attribute passed
-
# Number of output items that passed the evaluation.
-
#
-
# @return [Integer]
-
1
required :passed, Integer
-
-
# @!attribute total
-
# Total number of executed output items.
-
#
-
# @return [Integer]
-
1
required :total, Integer
-
-
# @!method initialize(errored:, failed:, passed:, total:)
-
# Counters summarizing the outcomes of the evaluation run.
-
#
-
# @param errored [Integer] Number of output items that resulted in an error.
-
#
-
# @param failed [Integer] Number of output items that failed to pass the evaluation.
-
#
-
# @param passed [Integer] Number of output items that passed the evaluation.
-
#
-
# @param total [Integer] Total number of executed output items.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Evals
-
# @see OpenAI::Resources::Evals::Runs#create
-
1
class RunCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute data_source
-
# Details about the run's data source.
-
#
-
# @return [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource]
-
1
required :data_source, union: -> { OpenAI::Evals::RunCreateParams::DataSource }
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute name
-
# The name of the run.
-
#
-
# @return [String, nil]
-
1
optional :name, String
-
-
# @!method initialize(data_source:, metadata: nil, name: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateParams} for more details.
-
#
-
# @param data_source [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource] Details about the run's data source.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param name [String] The name of the run.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Details about the run's data source.
-
1
module DataSource
-
1
extend OpenAI::Internal::Type::Union
-
-
# A JsonlRunDataSource object with that specifies a JSONL file that matches the eval
-
1
variant -> { OpenAI::Evals::CreateEvalJSONLRunDataSource }
-
-
# A CompletionsRunDataSource object describing a model sampling configuration.
-
1
variant -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource }
-
-
# A ResponsesRunDataSource object describing a model sampling configuration.
-
1
variant -> { OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource }
-
-
1
class CreateEvalResponsesRunDataSource < OpenAI::Internal::Type::BaseModel
-
# @!attribute source
-
# Determines what populates the `item` namespace in this run's data source.
-
#
-
# @return [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileID, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::Responses]
-
1
required :source,
-
union: -> {
-
OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source
-
}
-
-
# @!attribute type
-
# The type of run data source. Always `responses`.
-
#
-
# @return [Symbol, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Type]
-
1
required :type,
-
enum: -> {
-
OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Type
-
}
-
-
# @!attribute input_messages
-
# Used when sampling from a model. Dictates the structure of the messages passed
-
# into the model. Can either be a reference to a prebuilt trajectory (ie,
-
# `item.input_trajectory`), or a template with variable references to the `item`
-
# namespace.
-
#
-
# @return [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::ItemReference, nil]
-
1
optional :input_messages,
-
union: -> {
-
OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages
-
}
-
-
# @!attribute model
-
# The name of the model to use for generating completions (e.g. "o3-mini").
-
#
-
# @return [String, nil]
-
1
optional :model, String
-
-
# @!attribute sampling_params
-
#
-
# @return [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams, nil]
-
1
optional :sampling_params,
-
-> {
-
OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams
-
}
-
-
# @!method initialize(source:, type:, input_messages: nil, model: nil, sampling_params: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource}
-
# for more details.
-
#
-
# A ResponsesRunDataSource object describing a model sampling configuration.
-
#
-
# @param source [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileID, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::Responses] Determines what populates the `item` namespace in this run's data source.
-
#
-
# @param type [Symbol, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Type] The type of run data source. Always `responses`.
-
#
-
# @param input_messages [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::ItemReference] Used when sampling from a model. Dictates the structure of the messages passed i
-
#
-
# @param model [String] The name of the model to use for generating completions (e.g. "o3-mini").
-
#
-
# @param sampling_params [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams]
-
-
# Determines what populates the `item` namespace in this run's data source.
-
#
-
# @see OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource#source
-
1
module Source
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :file_content,
-
-> {
-
OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent
-
}
-
-
1
variant :file_id,
-
-> {
-
OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileID
-
}
-
-
# A EvalResponsesSource object describing a run data source configuration.
-
1
variant :responses,
-
-> {
-
OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::Responses
-
}
-
-
1
class FileContent < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content of the jsonl file.
-
#
-
# @return [Array<OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent::Content>]
-
1
required :content,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent::Content]
-
}
-
-
# @!attribute type
-
# The type of jsonl source. Always `file_content`.
-
#
-
# @return [Symbol, :file_content]
-
1
required :type, const: :file_content
-
-
# @!method initialize(content:, type: :file_content)
-
# @param content [Array<OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent::Content>] The content of the jsonl file.
-
#
-
# @param type [Symbol, :file_content] The type of jsonl source. Always `file_content`.
-
-
1
class Content < OpenAI::Internal::Type::BaseModel
-
# @!attribute item
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :item, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute sample
-
#
-
# @return [Hash{Symbol=>Object}, nil]
-
1
optional :sample, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!method initialize(item:, sample: nil)
-
# @param item [Hash{Symbol=>Object}]
-
# @param sample [Hash{Symbol=>Object}]
-
end
-
end
-
-
1
class FileID < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The identifier of the file.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute type
-
# The type of jsonl source. Always `file_id`.
-
#
-
# @return [Symbol, :file_id]
-
1
required :type, const: :file_id
-
-
# @!method initialize(id:, type: :file_id)
-
# @param id [String] The identifier of the file.
-
#
-
# @param type [Symbol, :file_id] The type of jsonl source. Always `file_id`.
-
end
-
-
1
class Responses < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of run data source. Always `responses`.
-
#
-
# @return [Symbol, :responses]
-
1
required :type, const: :responses
-
-
# @!attribute created_after
-
# Only include items created after this timestamp (inclusive). This is a query
-
# parameter used to select responses.
-
#
-
# @return [Integer, nil]
-
1
optional :created_after, Integer, nil?: true
-
-
# @!attribute created_before
-
# Only include items created before this timestamp (inclusive). This is a query
-
# parameter used to select responses.
-
#
-
# @return [Integer, nil]
-
1
optional :created_before, Integer, nil?: true
-
-
# @!attribute instructions_search
-
# Optional string to search the 'instructions' field. This is a query parameter
-
# used to select responses.
-
#
-
# @return [String, nil]
-
1
optional :instructions_search, String, nil?: true
-
-
# @!attribute metadata
-
# Metadata filter for the responses. This is a query parameter used to select
-
# responses.
-
#
-
# @return [Object, nil]
-
1
optional :metadata, OpenAI::Internal::Type::Unknown, nil?: true
-
-
# @!attribute model
-
# The name of the model to find responses for. This is a query parameter used to
-
# select responses.
-
#
-
# @return [String, nil]
-
1
optional :model, String, nil?: true
-
-
# @!attribute reasoning_effort
-
# Optional reasoning effort parameter. This is a query parameter used to select
-
# responses.
-
#
-
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
-
1
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
-
-
# @!attribute temperature
-
# Sampling temperature. This is a query parameter used to select responses.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float, nil?: true
-
-
# @!attribute tools
-
# List of tool names. This is a query parameter used to select responses.
-
#
-
# @return [Array<String>, nil]
-
1
optional :tools, OpenAI::Internal::Type::ArrayOf[String], nil?: true
-
-
# @!attribute top_p
-
# Nucleus sampling parameter. This is a query parameter used to select responses.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float, nil?: true
-
-
# @!attribute users
-
# List of user identifiers. This is a query parameter used to select responses.
-
#
-
# @return [Array<String>, nil]
-
1
optional :users, OpenAI::Internal::Type::ArrayOf[String], nil?: true
-
-
# @!method initialize(created_after: nil, created_before: nil, instructions_search: nil, metadata: nil, model: nil, reasoning_effort: nil, temperature: nil, tools: nil, top_p: nil, users: nil, type: :responses)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::Responses}
-
# for more details.
-
#
-
# A EvalResponsesSource object describing a run data source configuration.
-
#
-
# @param created_after [Integer, nil] Only include items created after this timestamp (inclusive). This is a query par
-
#
-
# @param created_before [Integer, nil] Only include items created before this timestamp (inclusive). This is a query pa
-
#
-
# @param instructions_search [String, nil] Optional string to search the 'instructions' field. This is a query parameter us
-
#
-
# @param metadata [Object, nil] Metadata filter for the responses. This is a query parameter used to select resp
-
#
-
# @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s
-
#
-
# @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re
-
#
-
# @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses.
-
#
-
# @param tools [Array<String>, nil] List of tool names. This is a query parameter used to select responses.
-
#
-
# @param top_p [Float, nil] Nucleus sampling parameter. This is a query parameter used to select responses.
-
#
-
# @param users [Array<String>, nil] List of user identifiers. This is a query parameter used to select responses.
-
#
-
# @param type [Symbol, :responses] The type of run data source. Always `responses`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileContent, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::FileID, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::Source::Responses)]
-
end
-
-
# The type of run data source. Always `responses`.
-
#
-
# @see OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
RESPONSES = :responses
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# Used when sampling from a model. Dictates the structure of the messages passed
-
# into the model. Can either be a reference to a prebuilt trajectory (ie,
-
# `item.input_trajectory`), or a template with variable references to the `item`
-
# namespace.
-
#
-
# @see OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource#input_messages
-
1
module InputMessages
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :template,
-
-> {
-
OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template
-
}
-
-
1
variant :item_reference,
-
-> {
-
OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::ItemReference
-
}
-
-
1
class Template < OpenAI::Internal::Type::BaseModel
-
# @!attribute template
-
# A list of chat messages forming the prompt or context. May include variable
-
# references to the `item` namespace, ie {{item.name}}.
-
#
-
# @return [Array<OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem>]
-
1
required :template,
-
-> do
-
OpenAI::Internal::Type::ArrayOf[
-
union: OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template
-
]
-
end
-
-
# @!attribute type
-
# The type of input messages. Always `template`.
-
#
-
# @return [Symbol, :template]
-
1
required :type, const: :template
-
-
# @!method initialize(template:, type: :template)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template}
-
# for more details.
-
#
-
# @param template [Array<OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem>] A list of chat messages forming the prompt or context. May include variable refe
-
#
-
# @param type [Symbol, :template] The type of input messages. Always `template`.
-
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
1
module Template
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant -> {
-
OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::ChatMessage
-
}
-
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
1
variant -> {
-
OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem
-
}
-
-
1
class ChatMessage < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content of the message.
-
#
-
# @return [String]
-
1
required :content, String
-
-
# @!attribute role
-
# The role of the message (e.g. "system", "assistant", "user").
-
#
-
# @return [String]
-
1
required :role, String
-
-
# @!method initialize(content:, role:)
-
# @param content [String] The content of the message.
-
#
-
# @param role [String] The role of the message (e.g. "system", "assistant", "user").
-
end
-
-
1
class EvalItem < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# Inputs to the model - can contain template strings.
-
#
-
# @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>]
-
1
required :content,
-
union: -> {
-
OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content
-
}
-
-
# @!attribute role
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @return [Symbol, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Role]
-
1
required :role,
-
enum: -> {
-
OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Role
-
}
-
-
# @!attribute type
-
# The type of the message input. Always `message`.
-
#
-
# @return [Symbol, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Type, nil]
-
1
optional :type,
-
enum: -> {
-
OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Type
-
}
-
-
# @!method initialize(content:, role:, type: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem}
-
# for more details.
-
#
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
#
-
# @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>] Inputs to the model - can contain template strings.
-
#
-
# @param role [Symbol, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Role] The role of the message input. One of `user`, `assistant`, `system`, or
-
#
-
# @param type [Symbol, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Type] The type of the message input. Always `message`.
-
-
# Inputs to the model - can contain template strings.
-
#
-
# @see OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# A text input to the model.
-
1
variant String
-
-
# A text input to the model.
-
1
variant -> { OpenAI::Responses::ResponseInputText }
-
-
# A text output from the model.
-
1
variant -> {
-
OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText
-
}
-
-
# An image input to the model.
-
1
variant -> {
-
OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage
-
}
-
-
# A list of inputs, each of which may be either an input text or input image object.
-
1
variant -> { OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::AnArrayOfInputTextAndInputImageArray }
-
-
1
class OutputText < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The text output from the model.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of the output text. Always `output_text`.
-
#
-
# @return [Symbol, :output_text]
-
1
required :type, const: :output_text
-
-
# @!method initialize(text:, type: :output_text)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText}
-
# for more details.
-
#
-
# A text output from the model.
-
#
-
# @param text [String] The text output from the model.
-
#
-
# @param type [Symbol, :output_text] The type of the output text. Always `output_text`.
-
end
-
-
1
class InputImage < OpenAI::Internal::Type::BaseModel
-
# @!attribute image_url
-
# The URL of the image input.
-
#
-
# @return [String]
-
1
required :image_url, String
-
-
# @!attribute type
-
# The type of the image input. Always `input_image`.
-
#
-
# @return [Symbol, :input_image]
-
1
required :type, const: :input_image
-
-
# @!attribute detail
-
# The detail level of the image to be sent to the model. One of `high`, `low`, or
-
# `auto`. Defaults to `auto`.
-
#
-
# @return [String, nil]
-
1
optional :detail, String
-
-
# @!method initialize(image_url:, detail: nil, type: :input_image)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage}
-
# for more details.
-
#
-
# An image input to the model.
-
#
-
# @param image_url [String] The URL of the image input.
-
#
-
# @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or
-
#
-
# @param type [Symbol, :input_image] The type of the image input. Always `input_image`.
-
end
-
-
# @!method self.variants
-
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
-
end
-
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @see OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem#role
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
USER = :user
-
1
ASSISTANT = :assistant
-
1
SYSTEM = :system
-
1
DEVELOPER = :developer
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The type of the message input. Always `message`.
-
#
-
# @see OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
MESSAGE = :message
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template::Template::EvalItem)]
-
end
-
end
-
-
1
class ItemReference < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_reference
-
# A reference to a variable in the `item` namespace. Ie, "item.name"
-
#
-
# @return [String]
-
1
required :item_reference, String
-
-
# @!attribute type
-
# The type of input messages. Always `item_reference`.
-
#
-
# @return [Symbol, :item_reference]
-
1
required :type, const: :item_reference
-
-
# @!method initialize(item_reference:, type: :item_reference)
-
# @param item_reference [String] A reference to a variable in the `item` namespace. Ie, "item.name"
-
#
-
# @param type [Symbol, :item_reference] The type of input messages. Always `item_reference`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::Template, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::InputMessages::ItemReference)]
-
end
-
-
# @see OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource#sampling_params
-
1
class SamplingParams < OpenAI::Internal::Type::BaseModel
-
# @!attribute max_completion_tokens
-
# The maximum number of tokens in the generated output.
-
#
-
# @return [Integer, nil]
-
1
optional :max_completion_tokens, Integer
-
-
# @!attribute seed
-
# A seed value to initialize the randomness, during sampling.
-
#
-
# @return [Integer, nil]
-
1
optional :seed, Integer
-
-
# @!attribute temperature
-
# A higher temperature increases randomness in the outputs.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float
-
-
# @!attribute text
-
# Configuration options for a text response from the model. Can be plain text or
-
# structured JSON data. Learn more:
-
#
-
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
#
-
# @return [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text, nil]
-
1
optional :text,
-
-> {
-
OpenAI::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text
-
}
-
-
# @!attribute tools
-
# An array of tools the model may call while generating a response. You can
-
# specify which tool to use by setting the `tool_choice` parameter.
-
#
-
# The two categories of tools you can provide the model are:
-
#
-
# - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
-
# capabilities, like
-
# [web search](https://platform.openai.com/docs/guides/tools-web-search) or
-
# [file search](https://platform.openai.com/docs/guides/tools-file-search).
-
# Learn more about
-
# [built-in tools](https://platform.openai.com/docs/guides/tools).
-
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
-
# the model to call your own code. Learn more about
-
# [function calling](https://platform.openai.com/docs/guides/function-calling).
-
#
-
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>, nil]
-
1
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
-
-
# @!attribute top_p
-
# An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float
-
-
# @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams}
-
# for more details.
-
#
-
# @param max_completion_tokens [Integer] The maximum number of tokens in the generated output.
-
#
-
# @param seed [Integer] A seed value to initialize the randomness, during sampling.
-
#
-
# @param temperature [Float] A higher temperature increases randomness in the outputs.
-
#
-
# @param text [OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
-
#
-
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
-
#
-
# @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
-
-
# @see OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams#text
-
1
class Text < OpenAI::Internal::Type::BaseModel
-
# @!attribute format_
-
# An object specifying the format that the model must output.
-
#
-
# Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
-
# ensures the model will match your supplied JSON schema. Learn more in the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# The default format is `{ "type": "text" }` with no additional options.
-
#
-
# **Not recommended for gpt-4o and newer models:**
-
#
-
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
-
# ensures the message the model generates is valid JSON. Using `json_schema` is
-
# preferred for models that support it.
-
#
-
# @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil]
-
1
optional :format_,
-
union: -> {
-
OpenAI::Responses::ResponseFormatTextConfig
-
},
-
api_name: :format
-
-
# @!method initialize(format_: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource::SamplingParams::Text}
-
# for more details.
-
#
-
# Configuration options for a text response from the model. Can be plain text or
-
# structured JSON data. Learn more:
-
#
-
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
#
-
# @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
-
end
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource)]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Evals
-
# @see OpenAI::Resources::Evals::Runs#create
-
1
class RunCreateResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for the evaluation run.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# Unix timestamp (in seconds) when the evaluation run was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data_source
-
# Information about the run's data source.
-
#
-
# @return [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses]
-
1
required :data_source, union: -> { OpenAI::Models::Evals::RunCreateResponse::DataSource }
-
-
# @!attribute error
-
# An object representing an error response from the Eval API.
-
#
-
# @return [OpenAI::Models::Evals::EvalAPIError]
-
1
required :error, -> { OpenAI::Evals::EvalAPIError }
-
-
# @!attribute eval_id
-
# The identifier of the associated evaluation.
-
#
-
# @return [String]
-
1
required :eval_id, String
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute model
-
# The model that is evaluated, if applicable.
-
#
-
# @return [String]
-
1
required :model, String
-
-
# @!attribute name
-
# The name of the evaluation run.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute object
-
# The type of the object. Always "eval.run".
-
#
-
# @return [Symbol, :"eval.run"]
-
1
required :object, const: :"eval.run"
-
-
# @!attribute per_model_usage
-
# Usage statistics for each model during the evaluation run.
-
#
-
# @return [Array<OpenAI::Models::Evals::RunCreateResponse::PerModelUsage>]
-
1
required :per_model_usage,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunCreateResponse::PerModelUsage] }
-
-
# @!attribute per_testing_criteria_results
-
# Results per testing criteria applied during the evaluation run.
-
#
-
# @return [Array<OpenAI::Models::Evals::RunCreateResponse::PerTestingCriteriaResult>]
-
1
required :per_testing_criteria_results,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunCreateResponse::PerTestingCriteriaResult] }
-
-
# @!attribute report_url
-
# The URL to the rendered evaluation run report on the UI dashboard.
-
#
-
# @return [String]
-
1
required :report_url, String
-
-
# @!attribute result_counts
-
# Counters summarizing the outcomes of the evaluation run.
-
#
-
# @return [OpenAI::Models::Evals::RunCreateResponse::ResultCounts]
-
1
required :result_counts, -> { OpenAI::Models::Evals::RunCreateResponse::ResultCounts }
-
-
# @!attribute status
-
# The status of the evaluation run.
-
#
-
# @return [String]
-
1
required :status, String
-
-
# @!method initialize(id:, created_at:, data_source:, error:, eval_id:, metadata:, model:, name:, per_model_usage:, per_testing_criteria_results:, report_url:, result_counts:, status:, object: :"eval.run")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateResponse} for more details.
-
#
-
# A schema representing an evaluation run.
-
#
-
# @param id [String] Unique identifier for the evaluation run.
-
#
-
# @param created_at [Integer] Unix timestamp (in seconds) when the evaluation run was created.
-
#
-
# @param data_source [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses] Information about the run's data source.
-
#
-
# @param error [OpenAI::Models::Evals::EvalAPIError] An object representing an error response from the Eval API.
-
#
-
# @param eval_id [String] The identifier of the associated evaluation.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param model [String] The model that is evaluated, if applicable.
-
#
-
# @param name [String] The name of the evaluation run.
-
#
-
# @param per_model_usage [Array<OpenAI::Models::Evals::RunCreateResponse::PerModelUsage>] Usage statistics for each model during the evaluation run.
-
#
-
# @param per_testing_criteria_results [Array<OpenAI::Models::Evals::RunCreateResponse::PerTestingCriteriaResult>] Results per testing criteria applied during the evaluation run.
-
#
-
# @param report_url [String] The URL to the rendered evaluation run report on the UI dashboard.
-
#
-
# @param result_counts [OpenAI::Models::Evals::RunCreateResponse::ResultCounts] Counters summarizing the outcomes of the evaluation run.
-
#
-
# @param status [String] The status of the evaluation run.
-
#
-
# @param object [Symbol, :"eval.run"] The type of the object. Always "eval.run".
-
-
# Information about the run's data source.
-
#
-
# @see OpenAI::Models::Evals::RunCreateResponse#data_source
-
1
module DataSource
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A JsonlRunDataSource object with that specifies a JSONL file that matches the eval
-
1
variant :jsonl, -> { OpenAI::Evals::CreateEvalJSONLRunDataSource }
-
-
# A CompletionsRunDataSource object describing a model sampling configuration.
-
1
variant :completions, -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource }
-
-
# A ResponsesRunDataSource object describing a model sampling configuration.
-
1
variant :responses, -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses }
-
-
1
class Responses < OpenAI::Internal::Type::BaseModel
-
# @!attribute source
-
# Determines what populates the `item` namespace in this run's data source.
-
#
-
# @return [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::Responses]
-
1
required :source, union: -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source }
-
-
# @!attribute type
-
# The type of run data source. Always `responses`.
-
#
-
# @return [Symbol, :responses]
-
1
required :type, const: :responses
-
-
# @!attribute input_messages
-
# Used when sampling from a model. Dictates the structure of the messages passed
-
# into the model. Can either be a reference to a prebuilt trajectory (ie,
-
# `item.input_trajectory`), or a template with variable references to the `item`
-
# namespace.
-
#
-
# @return [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::ItemReference, nil]
-
1
optional :input_messages,
-
union: -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages }
-
-
# @!attribute model
-
# The name of the model to use for generating completions (e.g. "o3-mini").
-
#
-
# @return [String, nil]
-
1
optional :model, String
-
-
# @!attribute sampling_params
-
#
-
# @return [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams, nil]
-
1
optional :sampling_params,
-
-> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams }
-
-
# @!method initialize(source:, input_messages: nil, model: nil, sampling_params: nil, type: :responses)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses} for more
-
# details.
-
#
-
# A ResponsesRunDataSource object describing a model sampling configuration.
-
#
-
# @param source [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::Responses] Determines what populates the `item` namespace in this run's data source.
-
#
-
# @param input_messages [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::ItemReference] Used when sampling from a model. Dictates the structure of the messages passed i
-
#
-
# @param model [String] The name of the model to use for generating completions (e.g. "o3-mini").
-
#
-
# @param sampling_params [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams]
-
#
-
# @param type [Symbol, :responses] The type of run data source. Always `responses`.
-
-
# Determines what populates the `item` namespace in this run's data source.
-
#
-
# @see OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses#source
-
1
module Source
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :file_content,
-
-> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent }
-
-
1
variant :file_id, -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileID }
-
-
# A EvalResponsesSource object describing a run data source configuration.
-
1
variant :responses,
-
-> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::Responses }
-
-
1
class FileContent < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content of the jsonl file.
-
#
-
# @return [Array<OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent::Content>]
-
1
required :content,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent::Content] }
-
-
# @!attribute type
-
# The type of jsonl source. Always `file_content`.
-
#
-
# @return [Symbol, :file_content]
-
1
required :type, const: :file_content
-
-
# @!method initialize(content:, type: :file_content)
-
# @param content [Array<OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent::Content>] The content of the jsonl file.
-
#
-
# @param type [Symbol, :file_content] The type of jsonl source. Always `file_content`.
-
-
1
class Content < OpenAI::Internal::Type::BaseModel
-
# @!attribute item
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :item, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute sample
-
#
-
# @return [Hash{Symbol=>Object}, nil]
-
1
optional :sample, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!method initialize(item:, sample: nil)
-
# @param item [Hash{Symbol=>Object}]
-
# @param sample [Hash{Symbol=>Object}]
-
end
-
end
-
-
1
class FileID < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The identifier of the file.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute type
-
# The type of jsonl source. Always `file_id`.
-
#
-
# @return [Symbol, :file_id]
-
1
required :type, const: :file_id
-
-
# @!method initialize(id:, type: :file_id)
-
# @param id [String] The identifier of the file.
-
#
-
# @param type [Symbol, :file_id] The type of jsonl source. Always `file_id`.
-
end
-
-
1
class Responses < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of run data source. Always `responses`.
-
#
-
# @return [Symbol, :responses]
-
1
required :type, const: :responses
-
-
# @!attribute created_after
-
# Only include items created after this timestamp (inclusive). This is a query
-
# parameter used to select responses.
-
#
-
# @return [Integer, nil]
-
1
optional :created_after, Integer, nil?: true
-
-
# @!attribute created_before
-
# Only include items created before this timestamp (inclusive). This is a query
-
# parameter used to select responses.
-
#
-
# @return [Integer, nil]
-
1
optional :created_before, Integer, nil?: true
-
-
# @!attribute instructions_search
-
# Optional string to search the 'instructions' field. This is a query parameter
-
# used to select responses.
-
#
-
# @return [String, nil]
-
1
optional :instructions_search, String, nil?: true
-
-
# @!attribute metadata
-
# Metadata filter for the responses. This is a query parameter used to select
-
# responses.
-
#
-
# @return [Object, nil]
-
1
optional :metadata, OpenAI::Internal::Type::Unknown, nil?: true
-
-
# @!attribute model
-
# The name of the model to find responses for. This is a query parameter used to
-
# select responses.
-
#
-
# @return [String, nil]
-
1
optional :model, String, nil?: true
-
-
# @!attribute reasoning_effort
-
# Optional reasoning effort parameter. This is a query parameter used to select
-
# responses.
-
#
-
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
-
1
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
-
-
# @!attribute temperature
-
# Sampling temperature. This is a query parameter used to select responses.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float, nil?: true
-
-
# @!attribute tools
-
# List of tool names. This is a query parameter used to select responses.
-
#
-
# @return [Array<String>, nil]
-
1
optional :tools, OpenAI::Internal::Type::ArrayOf[String], nil?: true
-
-
# @!attribute top_p
-
# Nucleus sampling parameter. This is a query parameter used to select responses.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float, nil?: true
-
-
# @!attribute users
-
# List of user identifiers. This is a query parameter used to select responses.
-
#
-
# @return [Array<String>, nil]
-
1
optional :users, OpenAI::Internal::Type::ArrayOf[String], nil?: true
-
-
# @!method initialize(created_after: nil, created_before: nil, instructions_search: nil, metadata: nil, model: nil, reasoning_effort: nil, temperature: nil, tools: nil, top_p: nil, users: nil, type: :responses)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::Responses}
-
# for more details.
-
#
-
# A EvalResponsesSource object describing a run data source configuration.
-
#
-
# @param created_after [Integer, nil] Only include items created after this timestamp (inclusive). This is a query par
-
#
-
# @param created_before [Integer, nil] Only include items created before this timestamp (inclusive). This is a query pa
-
#
-
# @param instructions_search [String, nil] Optional string to search the 'instructions' field. This is a query parameter us
-
#
-
# @param metadata [Object, nil] Metadata filter for the responses. This is a query parameter used to select resp
-
#
-
# @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s
-
#
-
# @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re
-
#
-
# @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses.
-
#
-
# @param tools [Array<String>, nil] List of tool names. This is a query parameter used to select responses.
-
#
-
# @param top_p [Float, nil] Nucleus sampling parameter. This is a query parameter used to select responses.
-
#
-
# @param users [Array<String>, nil] List of user identifiers. This is a query parameter used to select responses.
-
#
-
# @param type [Symbol, :responses] The type of run data source. Always `responses`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::Source::Responses)]
-
end
-
-
# Used when sampling from a model. Dictates the structure of the messages passed
-
# into the model. Can either be a reference to a prebuilt trajectory (ie,
-
# `item.input_trajectory`), or a template with variable references to the `item`
-
# namespace.
-
#
-
# @see OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses#input_messages
-
1
module InputMessages
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :template,
-
-> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template }
-
-
1
variant :item_reference,
-
-> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::ItemReference }
-
-
1
class Template < OpenAI::Internal::Type::BaseModel
-
# @!attribute template
-
# A list of chat messages forming the prompt or context. May include variable
-
# references to the `item` namespace, ie {{item.name}}.
-
#
-
# @return [Array<OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem>]
-
1
required :template,
-
-> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template] }
-
-
# @!attribute type
-
# The type of input messages. Always `template`.
-
#
-
# @return [Symbol, :template]
-
1
required :type, const: :template
-
-
# @!method initialize(template:, type: :template)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template}
-
# for more details.
-
#
-
# @param template [Array<OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem>] A list of chat messages forming the prompt or context. May include variable refe
-
#
-
# @param type [Symbol, :template] The type of input messages. Always `template`.
-
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
1
module Template
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage }
-
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
1
variant -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem }
-
-
1
class ChatMessage < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content of the message.
-
#
-
# @return [String]
-
1
required :content, String
-
-
# @!attribute role
-
# The role of the message (e.g. "system", "assistant", "user").
-
#
-
# @return [String]
-
1
required :role, String
-
-
# @!method initialize(content:, role:)
-
# @param content [String] The content of the message.
-
#
-
# @param role [String] The role of the message (e.g. "system", "assistant", "user").
-
end
-
-
1
class EvalItem < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# Inputs to the model - can contain template strings.
-
#
-
# @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>]
-
1
required :content,
-
union: -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content }
-
-
# @!attribute role
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @return [Symbol, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role]
-
1
required :role,
-
enum: -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role }
-
-
# @!attribute type
-
# The type of the message input. Always `message`.
-
#
-
# @return [Symbol, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type, nil]
-
1
optional :type,
-
enum: -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type }
-
-
# @!method initialize(content:, role:, type: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem}
-
# for more details.
-
#
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
#
-
# @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>] Inputs to the model - can contain template strings.
-
#
-
# @param role [Symbol, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role] The role of the message input. One of `user`, `assistant`, `system`, or
-
#
-
# @param type [Symbol, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type] The type of the message input. Always `message`.
-
-
# Inputs to the model - can contain template strings.
-
#
-
# @see OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# A text input to the model.
-
1
variant String
-
-
# A text input to the model.
-
1
variant -> { OpenAI::Responses::ResponseInputText }
-
-
# A text output from the model.
-
1
variant -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText }
-
-
# An image input to the model.
-
1
variant -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage }
-
-
# A list of inputs, each of which may be either an input text or input image object.
-
1
variant -> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::AnArrayOfInputTextAndInputImageArray }
-
-
1
class OutputText < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The text output from the model.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of the output text. Always `output_text`.
-
#
-
# @return [Symbol, :output_text]
-
1
required :type, const: :output_text
-
-
# @!method initialize(text:, type: :output_text)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText}
-
# for more details.
-
#
-
# A text output from the model.
-
#
-
# @param text [String] The text output from the model.
-
#
-
# @param type [Symbol, :output_text] The type of the output text. Always `output_text`.
-
end
-
-
1
class InputImage < OpenAI::Internal::Type::BaseModel
-
# @!attribute image_url
-
# The URL of the image input.
-
#
-
# @return [String]
-
1
required :image_url, String
-
-
# @!attribute type
-
# The type of the image input. Always `input_image`.
-
#
-
# @return [Symbol, :input_image]
-
1
required :type, const: :input_image
-
-
# @!attribute detail
-
# The detail level of the image to be sent to the model. One of `high`, `low`, or
-
# `auto`. Defaults to `auto`.
-
#
-
# @return [String, nil]
-
1
optional :detail, String
-
-
# @!method initialize(image_url:, detail: nil, type: :input_image)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage}
-
# for more details.
-
#
-
# An image input to the model.
-
#
-
# @param image_url [String] The URL of the image input.
-
#
-
# @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or
-
#
-
# @param type [Symbol, :input_image] The type of the image input. Always `input_image`.
-
end
-
-
# @!method self.variants
-
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
-
end
-
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @see OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#role
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
USER = :user
-
1
ASSISTANT = :assistant
-
1
SYSTEM = :system
-
1
DEVELOPER = :developer
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The type of the message input. Always `message`.
-
#
-
# @see OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
MESSAGE = :message
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem)]
-
end
-
end
-
-
1
class ItemReference < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_reference
-
# A reference to a variable in the `item` namespace. Ie, "item.name"
-
#
-
# @return [String]
-
1
required :item_reference, String
-
-
# @!attribute type
-
# The type of input messages. Always `item_reference`.
-
#
-
# @return [Symbol, :item_reference]
-
1
required :type, const: :item_reference
-
-
# @!method initialize(item_reference:, type: :item_reference)
-
# @param item_reference [String] A reference to a variable in the `item` namespace. Ie, "item.name"
-
#
-
# @param type [Symbol, :item_reference] The type of input messages. Always `item_reference`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::InputMessages::ItemReference)]
-
end
-
-
# @see OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses#sampling_params
-
1
class SamplingParams < OpenAI::Internal::Type::BaseModel
-
# @!attribute max_completion_tokens
-
# The maximum number of tokens in the generated output.
-
#
-
# @return [Integer, nil]
-
1
optional :max_completion_tokens, Integer
-
-
# @!attribute seed
-
# A seed value to initialize the randomness, during sampling.
-
#
-
# @return [Integer, nil]
-
1
optional :seed, Integer
-
-
# @!attribute temperature
-
# A higher temperature increases randomness in the outputs.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float
-
-
# @!attribute text
-
# Configuration options for a text response from the model. Can be plain text or
-
# structured JSON data. Learn more:
-
#
-
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
#
-
# @return [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text, nil]
-
1
optional :text,
-
-> { OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text }
-
-
# @!attribute tools
-
# An array of tools the model may call while generating a response. You can
-
# specify which tool to use by setting the `tool_choice` parameter.
-
#
-
# The two categories of tools you can provide the model are:
-
#
-
# - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
-
# capabilities, like
-
# [web search](https://platform.openai.com/docs/guides/tools-web-search) or
-
# [file search](https://platform.openai.com/docs/guides/tools-file-search).
-
# Learn more about
-
# [built-in tools](https://platform.openai.com/docs/guides/tools).
-
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
-
# the model to call your own code. Learn more about
-
# [function calling](https://platform.openai.com/docs/guides/function-calling).
-
#
-
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>, nil]
-
1
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
-
-
# @!attribute top_p
-
# An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float
-
-
# @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams}
-
# for more details.
-
#
-
# @param max_completion_tokens [Integer] The maximum number of tokens in the generated output.
-
#
-
# @param seed [Integer] A seed value to initialize the randomness, during sampling.
-
#
-
# @param temperature [Float] A higher temperature increases randomness in the outputs.
-
#
-
# @param text [OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
-
#
-
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
-
#
-
# @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
-
-
# @see OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams#text
-
1
class Text < OpenAI::Internal::Type::BaseModel
-
# @!attribute format_
-
# An object specifying the format that the model must output.
-
#
-
# Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
-
# ensures the model will match your supplied JSON schema. Learn more in the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# The default format is `{ "type": "text" }` with no additional options.
-
#
-
# **Not recommended for gpt-4o and newer models:**
-
#
-
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
-
# ensures the message the model generates is valid JSON. Using `json_schema` is
-
# preferred for models that support it.
-
#
-
# @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil]
-
1
optional :format_,
-
union: -> {
-
OpenAI::Responses::ResponseFormatTextConfig
-
},
-
api_name: :format
-
-
# @!method initialize(format_: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses::SamplingParams::Text}
-
# for more details.
-
#
-
# Configuration options for a text response from the model. Can be plain text or
-
# structured JSON data. Learn more:
-
#
-
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
#
-
# @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
-
end
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCreateResponse::DataSource::Responses)]
-
end
-
-
1
class PerModelUsage < OpenAI::Internal::Type::BaseModel
-
# @!attribute cached_tokens
-
# The number of tokens retrieved from cache.
-
#
-
# @return [Integer]
-
1
required :cached_tokens, Integer
-
-
# @!attribute completion_tokens
-
# The number of completion tokens generated.
-
#
-
# @return [Integer]
-
1
required :completion_tokens, Integer
-
-
# @!attribute invocation_count
-
# The number of invocations.
-
#
-
# @return [Integer]
-
1
required :invocation_count, Integer
-
-
# @!attribute model_name
-
# The name of the model.
-
#
-
# @return [String]
-
1
required :model_name, String
-
-
# @!attribute prompt_tokens
-
# The number of prompt tokens used.
-
#
-
# @return [Integer]
-
1
required :prompt_tokens, Integer
-
-
# @!attribute total_tokens
-
# The total number of tokens used.
-
#
-
# @return [Integer]
-
1
required :total_tokens, Integer
-
-
# @!method initialize(cached_tokens:, completion_tokens:, invocation_count:, model_name:, prompt_tokens:, total_tokens:)
-
# @param cached_tokens [Integer] The number of tokens retrieved from cache.
-
#
-
# @param completion_tokens [Integer] The number of completion tokens generated.
-
#
-
# @param invocation_count [Integer] The number of invocations.
-
#
-
# @param model_name [String] The name of the model.
-
#
-
# @param prompt_tokens [Integer] The number of prompt tokens used.
-
#
-
# @param total_tokens [Integer] The total number of tokens used.
-
end
-
-
1
class PerTestingCriteriaResult < OpenAI::Internal::Type::BaseModel
-
# @!attribute failed
-
# Number of tests failed for this criteria.
-
#
-
# @return [Integer]
-
1
required :failed, Integer
-
-
# @!attribute passed
-
# Number of tests passed for this criteria.
-
#
-
# @return [Integer]
-
1
required :passed, Integer
-
-
# @!attribute testing_criteria
-
# A description of the testing criteria.
-
#
-
# @return [String]
-
1
required :testing_criteria, String
-
-
# @!method initialize(failed:, passed:, testing_criteria:)
-
# @param failed [Integer] Number of tests failed for this criteria.
-
#
-
# @param passed [Integer] Number of tests passed for this criteria.
-
#
-
# @param testing_criteria [String] A description of the testing criteria.
-
end
-
-
# @see OpenAI::Models::Evals::RunCreateResponse#result_counts
-
1
class ResultCounts < OpenAI::Internal::Type::BaseModel
-
# @!attribute errored
-
# Number of output items that resulted in an error.
-
#
-
# @return [Integer]
-
1
required :errored, Integer
-
-
# @!attribute failed
-
# Number of output items that failed to pass the evaluation.
-
#
-
# @return [Integer]
-
1
required :failed, Integer
-
-
# @!attribute passed
-
# Number of output items that passed the evaluation.
-
#
-
# @return [Integer]
-
1
required :passed, Integer
-
-
# @!attribute total
-
# Total number of executed output items.
-
#
-
# @return [Integer]
-
1
required :total, Integer
-
-
# @!method initialize(errored:, failed:, passed:, total:)
-
# Counters summarizing the outcomes of the evaluation run.
-
#
-
# @param errored [Integer] Number of output items that resulted in an error.
-
#
-
# @param failed [Integer] Number of output items that failed to pass the evaluation.
-
#
-
# @param passed [Integer] Number of output items that passed the evaluation.
-
#
-
# @param total [Integer] Total number of executed output items.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Evals
-
# @see OpenAI::Resources::Evals::Runs#delete
-
1
class RunDeleteParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute eval_id
-
#
-
# @return [String]
-
1
required :eval_id, String
-
-
# @!method initialize(eval_id:, request_options: {})
-
# @param eval_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Evals
-
# @see OpenAI::Resources::Evals::Runs#delete
-
1
class RunDeleteResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute deleted
-
#
-
# @return [Boolean, nil]
-
1
optional :deleted, OpenAI::Internal::Type::Boolean
-
-
# @!attribute object
-
#
-
# @return [String, nil]
-
1
optional :object, String
-
-
# @!attribute run_id
-
#
-
# @return [String, nil]
-
1
optional :run_id, String
-
-
# @!method initialize(deleted: nil, object: nil, run_id: nil)
-
# @param deleted [Boolean]
-
# @param object [String]
-
# @param run_id [String]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Evals
-
# @see OpenAI::Resources::Evals::Runs#list
-
1
class RunListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute after
-
# Identifier for the last run from the previous pagination request.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute limit
-
# Number of runs to retrieve.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!attribute order
-
# Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for
-
# descending order. Defaults to `asc`.
-
#
-
# @return [Symbol, OpenAI::Models::Evals::RunListParams::Order, nil]
-
1
optional :order, enum: -> { OpenAI::Evals::RunListParams::Order }
-
-
# @!attribute status
-
# Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed`
-
# | `canceled`.
-
#
-
# @return [Symbol, OpenAI::Models::Evals::RunListParams::Status, nil]
-
1
optional :status, enum: -> { OpenAI::Evals::RunListParams::Status }
-
-
# @!method initialize(after: nil, limit: nil, order: nil, status: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunListParams} for more details.
-
#
-
# @param after [String] Identifier for the last run from the previous pagination request.
-
#
-
# @param limit [Integer] Number of runs to retrieve.
-
#
-
# @param order [Symbol, OpenAI::Models::Evals::RunListParams::Order] Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for de
-
#
-
# @param status [Symbol, OpenAI::Models::Evals::RunListParams::Status] Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed`
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for
-
# descending order. Defaults to `asc`.
-
1
module Order
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASC = :asc
-
1
DESC = :desc
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed`
-
# | `canceled`.
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
QUEUED = :queued
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
CANCELED = :canceled
-
1
FAILED = :failed
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Evals
-
# @see OpenAI::Resources::Evals::Runs#list
-
1
class RunListResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for the evaluation run.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# Unix timestamp (in seconds) when the evaluation run was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data_source
-
# Information about the run's data source.
-
#
-
# @return [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunListResponse::DataSource::Responses]
-
1
required :data_source, union: -> { OpenAI::Models::Evals::RunListResponse::DataSource }
-
-
# @!attribute error
-
# An object representing an error response from the Eval API.
-
#
-
# @return [OpenAI::Models::Evals::EvalAPIError]
-
1
required :error, -> { OpenAI::Evals::EvalAPIError }
-
-
# @!attribute eval_id
-
# The identifier of the associated evaluation.
-
#
-
# @return [String]
-
1
required :eval_id, String
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute model
-
# The model that is evaluated, if applicable.
-
#
-
# @return [String]
-
1
required :model, String
-
-
# @!attribute name
-
# The name of the evaluation run.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute object
-
# The type of the object. Always "eval.run".
-
#
-
# @return [Symbol, :"eval.run"]
-
1
required :object, const: :"eval.run"
-
-
# @!attribute per_model_usage
-
# Usage statistics for each model during the evaluation run.
-
#
-
# @return [Array<OpenAI::Models::Evals::RunListResponse::PerModelUsage>]
-
1
required :per_model_usage,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunListResponse::PerModelUsage] }
-
-
# @!attribute per_testing_criteria_results
-
# Results per testing criteria applied during the evaluation run.
-
#
-
# @return [Array<OpenAI::Models::Evals::RunListResponse::PerTestingCriteriaResult>]
-
1
required :per_testing_criteria_results,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunListResponse::PerTestingCriteriaResult] }
-
-
# @!attribute report_url
-
# The URL to the rendered evaluation run report on the UI dashboard.
-
#
-
# @return [String]
-
1
required :report_url, String
-
-
# @!attribute result_counts
-
# Counters summarizing the outcomes of the evaluation run.
-
#
-
# @return [OpenAI::Models::Evals::RunListResponse::ResultCounts]
-
1
required :result_counts, -> { OpenAI::Models::Evals::RunListResponse::ResultCounts }
-
-
# @!attribute status
-
# The status of the evaluation run.
-
#
-
# @return [String]
-
1
required :status, String
-
-
# @!method initialize(id:, created_at:, data_source:, error:, eval_id:, metadata:, model:, name:, per_model_usage:, per_testing_criteria_results:, report_url:, result_counts:, status:, object: :"eval.run")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunListResponse} for more details.
-
#
-
# A schema representing an evaluation run.
-
#
-
# @param id [String] Unique identifier for the evaluation run.
-
#
-
# @param created_at [Integer] Unix timestamp (in seconds) when the evaluation run was created.
-
#
-
# @param data_source [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunListResponse::DataSource::Responses] Information about the run's data source.
-
#
-
# @param error [OpenAI::Models::Evals::EvalAPIError] An object representing an error response from the Eval API.
-
#
-
# @param eval_id [String] The identifier of the associated evaluation.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param model [String] The model that is evaluated, if applicable.
-
#
-
# @param name [String] The name of the evaluation run.
-
#
-
# @param per_model_usage [Array<OpenAI::Models::Evals::RunListResponse::PerModelUsage>] Usage statistics for each model during the evaluation run.
-
#
-
# @param per_testing_criteria_results [Array<OpenAI::Models::Evals::RunListResponse::PerTestingCriteriaResult>] Results per testing criteria applied during the evaluation run.
-
#
-
# @param report_url [String] The URL to the rendered evaluation run report on the UI dashboard.
-
#
-
# @param result_counts [OpenAI::Models::Evals::RunListResponse::ResultCounts] Counters summarizing the outcomes of the evaluation run.
-
#
-
# @param status [String] The status of the evaluation run.
-
#
-
# @param object [Symbol, :"eval.run"] The type of the object. Always "eval.run".
-
-
# Information about the run's data source.
-
#
-
# @see OpenAI::Models::Evals::RunListResponse#data_source
-
1
module DataSource
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A JsonlRunDataSource object with that specifies a JSONL file that matches the eval
-
1
variant :jsonl, -> { OpenAI::Evals::CreateEvalJSONLRunDataSource }
-
-
# A CompletionsRunDataSource object describing a model sampling configuration.
-
1
variant :completions, -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource }
-
-
# A ResponsesRunDataSource object describing a model sampling configuration.
-
1
variant :responses, -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses }
-
-
1
class Responses < OpenAI::Internal::Type::BaseModel
-
# @!attribute source
-
# Determines what populates the `item` namespace in this run's data source.
-
#
-
# @return [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::Responses]
-
1
required :source, union: -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source }
-
-
# @!attribute type
-
# The type of run data source. Always `responses`.
-
#
-
# @return [Symbol, :responses]
-
1
required :type, const: :responses
-
-
# @!attribute input_messages
-
# Used when sampling from a model. Dictates the structure of the messages passed
-
# into the model. Can either be a reference to a prebuilt trajectory (ie,
-
# `item.input_trajectory`), or a template with variable references to the `item`
-
# namespace.
-
#
-
# @return [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::ItemReference, nil]
-
1
optional :input_messages,
-
union: -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages }
-
-
# @!attribute model
-
# The name of the model to use for generating completions (e.g. "o3-mini").
-
#
-
# @return [String, nil]
-
1
optional :model, String
-
-
# @!attribute sampling_params
-
#
-
# @return [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams, nil]
-
1
optional :sampling_params,
-
-> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams }
-
-
# @!method initialize(source:, input_messages: nil, model: nil, sampling_params: nil, type: :responses)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunListResponse::DataSource::Responses} for more
-
# details.
-
#
-
# A ResponsesRunDataSource object describing a model sampling configuration.
-
#
-
# @param source [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::Responses] Determines what populates the `item` namespace in this run's data source.
-
#
-
# @param input_messages [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::ItemReference] Used when sampling from a model. Dictates the structure of the messages passed i
-
#
-
# @param model [String] The name of the model to use for generating completions (e.g. "o3-mini").
-
#
-
# @param sampling_params [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams]
-
#
-
# @param type [Symbol, :responses] The type of run data source. Always `responses`.
-
-
# Determines what populates the `item` namespace in this run's data source.
-
#
-
# @see OpenAI::Models::Evals::RunListResponse::DataSource::Responses#source
-
1
module Source
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :file_content,
-
-> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent }
-
-
1
variant :file_id, -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileID }
-
-
# A EvalResponsesSource object describing a run data source configuration.
-
1
variant :responses,
-
-> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::Responses }
-
-
1
class FileContent < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content of the jsonl file.
-
#
-
# @return [Array<OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent::Content>]
-
1
required :content,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent::Content] }
-
-
# @!attribute type
-
# The type of jsonl source. Always `file_content`.
-
#
-
# @return [Symbol, :file_content]
-
1
required :type, const: :file_content
-
-
# @!method initialize(content:, type: :file_content)
-
# @param content [Array<OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent::Content>] The content of the jsonl file.
-
#
-
# @param type [Symbol, :file_content] The type of jsonl source. Always `file_content`.
-
-
1
class Content < OpenAI::Internal::Type::BaseModel
-
# @!attribute item
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :item, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute sample
-
#
-
# @return [Hash{Symbol=>Object}, nil]
-
1
optional :sample, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!method initialize(item:, sample: nil)
-
# @param item [Hash{Symbol=>Object}]
-
# @param sample [Hash{Symbol=>Object}]
-
end
-
end
-
-
1
class FileID < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The identifier of the file.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute type
-
# The type of jsonl source. Always `file_id`.
-
#
-
# @return [Symbol, :file_id]
-
1
required :type, const: :file_id
-
-
# @!method initialize(id:, type: :file_id)
-
# @param id [String] The identifier of the file.
-
#
-
# @param type [Symbol, :file_id] The type of jsonl source. Always `file_id`.
-
end
-
-
1
class Responses < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of run data source. Always `responses`.
-
#
-
# @return [Symbol, :responses]
-
1
required :type, const: :responses
-
-
# @!attribute created_after
-
# Only include items created after this timestamp (inclusive). This is a query
-
# parameter used to select responses.
-
#
-
# @return [Integer, nil]
-
1
optional :created_after, Integer, nil?: true
-
-
# @!attribute created_before
-
# Only include items created before this timestamp (inclusive). This is a query
-
# parameter used to select responses.
-
#
-
# @return [Integer, nil]
-
1
optional :created_before, Integer, nil?: true
-
-
# @!attribute instructions_search
-
# Optional string to search the 'instructions' field. This is a query parameter
-
# used to select responses.
-
#
-
# @return [String, nil]
-
1
optional :instructions_search, String, nil?: true
-
-
# @!attribute metadata
-
# Metadata filter for the responses. This is a query parameter used to select
-
# responses.
-
#
-
# @return [Object, nil]
-
1
optional :metadata, OpenAI::Internal::Type::Unknown, nil?: true
-
-
# @!attribute model
-
# The name of the model to find responses for. This is a query parameter used to
-
# select responses.
-
#
-
# @return [String, nil]
-
1
optional :model, String, nil?: true
-
-
# @!attribute reasoning_effort
-
# Optional reasoning effort parameter. This is a query parameter used to select
-
# responses.
-
#
-
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
-
1
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
-
-
# @!attribute temperature
-
# Sampling temperature. This is a query parameter used to select responses.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float, nil?: true
-
-
# @!attribute tools
-
# List of tool names. This is a query parameter used to select responses.
-
#
-
# @return [Array<String>, nil]
-
1
optional :tools, OpenAI::Internal::Type::ArrayOf[String], nil?: true
-
-
# @!attribute top_p
-
# Nucleus sampling parameter. This is a query parameter used to select responses.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float, nil?: true
-
-
# @!attribute users
-
# List of user identifiers. This is a query parameter used to select responses.
-
#
-
# @return [Array<String>, nil]
-
1
optional :users, OpenAI::Internal::Type::ArrayOf[String], nil?: true
-
-
# @!method initialize(created_after: nil, created_before: nil, instructions_search: nil, metadata: nil, model: nil, reasoning_effort: nil, temperature: nil, tools: nil, top_p: nil, users: nil, type: :responses)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::Responses}
-
# for more details.
-
#
-
# A EvalResponsesSource object describing a run data source configuration.
-
#
-
# @param created_after [Integer, nil] Only include items created after this timestamp (inclusive). This is a query par
-
#
-
# @param created_before [Integer, nil] Only include items created before this timestamp (inclusive). This is a query pa
-
#
-
# @param instructions_search [String, nil] Optional string to search the 'instructions' field. This is a query parameter us
-
#
-
# @param metadata [Object, nil] Metadata filter for the responses. This is a query parameter used to select resp
-
#
-
# @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s
-
#
-
# @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re
-
#
-
# @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses.
-
#
-
# @param tools [Array<String>, nil] List of tool names. This is a query parameter used to select responses.
-
#
-
# @param top_p [Float, nil] Nucleus sampling parameter. This is a query parameter used to select responses.
-
#
-
# @param users [Array<String>, nil] List of user identifiers. This is a query parameter used to select responses.
-
#
-
# @param type [Symbol, :responses] The type of run data source. Always `responses`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::Source::Responses)]
-
end
-
-
# Used when sampling from a model. Dictates the structure of the messages passed
-
# into the model. Can either be a reference to a prebuilt trajectory (ie,
-
# `item.input_trajectory`), or a template with variable references to the `item`
-
# namespace.
-
#
-
# @see OpenAI::Models::Evals::RunListResponse::DataSource::Responses#input_messages
-
1
module InputMessages
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :template,
-
-> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template }
-
-
1
variant :item_reference,
-
-> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::ItemReference }
-
-
1
class Template < OpenAI::Internal::Type::BaseModel
-
# @!attribute template
-
# A list of chat messages forming the prompt or context. May include variable
-
# references to the `item` namespace, ie {{item.name}}.
-
#
-
# @return [Array<OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem>]
-
1
required :template,
-
-> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template] }
-
-
# @!attribute type
-
# The type of input messages. Always `template`.
-
#
-
# @return [Symbol, :template]
-
1
required :type, const: :template
-
-
# @!method initialize(template:, type: :template)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template}
-
# for more details.
-
#
-
# @param template [Array<OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem>] A list of chat messages forming the prompt or context. May include variable refe
-
#
-
# @param type [Symbol, :template] The type of input messages. Always `template`.
-
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
1
module Template
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage }
-
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
1
variant -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem }
-
-
1
class ChatMessage < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content of the message.
-
#
-
# @return [String]
-
1
required :content, String
-
-
# @!attribute role
-
# The role of the message (e.g. "system", "assistant", "user").
-
#
-
# @return [String]
-
1
required :role, String
-
-
# @!method initialize(content:, role:)
-
# @param content [String] The content of the message.
-
#
-
# @param role [String] The role of the message (e.g. "system", "assistant", "user").
-
end
-
-
1
class EvalItem < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# Inputs to the model - can contain template strings.
-
#
-
# @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>]
-
1
required :content,
-
union: -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content }
-
-
# @!attribute role
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @return [Symbol, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role]
-
1
required :role,
-
enum: -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role }
-
-
# @!attribute type
-
# The type of the message input. Always `message`.
-
#
-
# @return [Symbol, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type, nil]
-
1
optional :type,
-
enum: -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type }
-
-
# @!method initialize(content:, role:, type: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem}
-
# for more details.
-
#
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
#
-
# @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>] Inputs to the model - can contain template strings.
-
#
-
# @param role [Symbol, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role] The role of the message input. One of `user`, `assistant`, `system`, or
-
#
-
# @param type [Symbol, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type] The type of the message input. Always `message`.
-
-
# Inputs to the model - can contain template strings.
-
#
-
# @see OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# A text input to the model.
-
1
variant String
-
-
# A text input to the model.
-
1
variant -> { OpenAI::Responses::ResponseInputText }
-
-
# A text output from the model.
-
1
variant -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText }
-
-
# An image input to the model.
-
1
variant -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage }
-
-
# A list of inputs, each of which may be either an input text or input image object.
-
1
variant -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::AnArrayOfInputTextAndInputImageArray }
-
-
1
class OutputText < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The text output from the model.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of the output text. Always `output_text`.
-
#
-
# @return [Symbol, :output_text]
-
1
required :type, const: :output_text
-
-
# @!method initialize(text:, type: :output_text)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText}
-
# for more details.
-
#
-
# A text output from the model.
-
#
-
# @param text [String] The text output from the model.
-
#
-
# @param type [Symbol, :output_text] The type of the output text. Always `output_text`.
-
end
-
-
1
class InputImage < OpenAI::Internal::Type::BaseModel
-
# @!attribute image_url
-
# The URL of the image input.
-
#
-
# @return [String]
-
1
required :image_url, String
-
-
# @!attribute type
-
# The type of the image input. Always `input_image`.
-
#
-
# @return [Symbol, :input_image]
-
1
required :type, const: :input_image
-
-
# @!attribute detail
-
# The detail level of the image to be sent to the model. One of `high`, `low`, or
-
# `auto`. Defaults to `auto`.
-
#
-
# @return [String, nil]
-
1
optional :detail, String
-
-
# @!method initialize(image_url:, detail: nil, type: :input_image)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage}
-
# for more details.
-
#
-
# An image input to the model.
-
#
-
# @param image_url [String] The URL of the image input.
-
#
-
# @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or
-
#
-
# @param type [Symbol, :input_image] The type of the image input. Always `input_image`.
-
end
-
-
# @!method self.variants
-
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
-
end
-
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @see OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#role
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
USER = :user
-
1
ASSISTANT = :assistant
-
1
SYSTEM = :system
-
1
DEVELOPER = :developer
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The type of the message input. Always `message`.
-
#
-
# @see OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
MESSAGE = :message
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem)]
-
end
-
end
-
-
1
class ItemReference < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_reference
-
# A reference to a variable in the `item` namespace. Ie, "item.name"
-
#
-
# @return [String]
-
1
required :item_reference, String
-
-
# @!attribute type
-
# The type of input messages. Always `item_reference`.
-
#
-
# @return [Symbol, :item_reference]
-
1
required :type, const: :item_reference
-
-
# @!method initialize(item_reference:, type: :item_reference)
-
# @param item_reference [String] A reference to a variable in the `item` namespace. Ie, "item.name"
-
#
-
# @param type [Symbol, :item_reference] The type of input messages. Always `item_reference`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunListResponse::DataSource::Responses::InputMessages::ItemReference)]
-
end
-
-
# @see OpenAI::Models::Evals::RunListResponse::DataSource::Responses#sampling_params
-
1
class SamplingParams < OpenAI::Internal::Type::BaseModel
-
# @!attribute max_completion_tokens
-
# The maximum number of tokens in the generated output.
-
#
-
# @return [Integer, nil]
-
1
optional :max_completion_tokens, Integer
-
-
# @!attribute seed
-
# A seed value to initialize the randomness, during sampling.
-
#
-
# @return [Integer, nil]
-
1
optional :seed, Integer
-
-
# @!attribute temperature
-
# A higher temperature increases randomness in the outputs.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float
-
-
# @!attribute text
-
# Configuration options for a text response from the model. Can be plain text or
-
# structured JSON data. Learn more:
-
#
-
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
#
-
# @return [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text, nil]
-
1
optional :text, -> { OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text }
-
-
# @!attribute tools
-
# An array of tools the model may call while generating a response. You can
-
# specify which tool to use by setting the `tool_choice` parameter.
-
#
-
# The two categories of tools you can provide the model are:
-
#
-
# - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
-
# capabilities, like
-
# [web search](https://platform.openai.com/docs/guides/tools-web-search) or
-
# [file search](https://platform.openai.com/docs/guides/tools-file-search).
-
# Learn more about
-
# [built-in tools](https://platform.openai.com/docs/guides/tools).
-
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
-
# the model to call your own code. Learn more about
-
# [function calling](https://platform.openai.com/docs/guides/function-calling).
-
#
-
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>, nil]
-
1
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
-
-
# @!attribute top_p
-
# An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float
-
-
# @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams}
-
# for more details.
-
#
-
# @param max_completion_tokens [Integer] The maximum number of tokens in the generated output.
-
#
-
# @param seed [Integer] A seed value to initialize the randomness, during sampling.
-
#
-
# @param temperature [Float] A higher temperature increases randomness in the outputs.
-
#
-
# @param text [OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
-
#
-
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
-
#
-
# @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
-
-
# @see OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams#text
-
1
class Text < OpenAI::Internal::Type::BaseModel
-
# @!attribute format_
-
# An object specifying the format that the model must output.
-
#
-
# Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
-
# ensures the model will match your supplied JSON schema. Learn more in the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# The default format is `{ "type": "text" }` with no additional options.
-
#
-
# **Not recommended for gpt-4o and newer models:**
-
#
-
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
-
# ensures the message the model generates is valid JSON. Using `json_schema` is
-
# preferred for models that support it.
-
#
-
# @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil]
-
1
optional :format_,
-
union: -> {
-
OpenAI::Responses::ResponseFormatTextConfig
-
},
-
api_name: :format
-
-
# @!method initialize(format_: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunListResponse::DataSource::Responses::SamplingParams::Text}
-
# for more details.
-
#
-
# Configuration options for a text response from the model. Can be plain text or
-
# structured JSON data. Learn more:
-
#
-
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
#
-
# @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
-
end
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunListResponse::DataSource::Responses)]
-
end
-
-
1
class PerModelUsage < OpenAI::Internal::Type::BaseModel
-
# @!attribute cached_tokens
-
# The number of tokens retrieved from cache.
-
#
-
# @return [Integer]
-
1
required :cached_tokens, Integer
-
-
# @!attribute completion_tokens
-
# The number of completion tokens generated.
-
#
-
# @return [Integer]
-
1
required :completion_tokens, Integer
-
-
# @!attribute invocation_count
-
# The number of invocations.
-
#
-
# @return [Integer]
-
1
required :invocation_count, Integer
-
-
# @!attribute model_name
-
# The name of the model.
-
#
-
# @return [String]
-
1
required :model_name, String
-
-
# @!attribute prompt_tokens
-
# The number of prompt tokens used.
-
#
-
# @return [Integer]
-
1
required :prompt_tokens, Integer
-
-
# @!attribute total_tokens
-
# The total number of tokens used.
-
#
-
# @return [Integer]
-
1
required :total_tokens, Integer
-
-
# @!method initialize(cached_tokens:, completion_tokens:, invocation_count:, model_name:, prompt_tokens:, total_tokens:)
-
# @param cached_tokens [Integer] The number of tokens retrieved from cache.
-
#
-
# @param completion_tokens [Integer] The number of completion tokens generated.
-
#
-
# @param invocation_count [Integer] The number of invocations.
-
#
-
# @param model_name [String] The name of the model.
-
#
-
# @param prompt_tokens [Integer] The number of prompt tokens used.
-
#
-
# @param total_tokens [Integer] The total number of tokens used.
-
end
-
-
1
class PerTestingCriteriaResult < OpenAI::Internal::Type::BaseModel
-
# @!attribute failed
-
# Number of tests failed for this criteria.
-
#
-
# @return [Integer]
-
1
required :failed, Integer
-
-
# @!attribute passed
-
# Number of tests passed for this criteria.
-
#
-
# @return [Integer]
-
1
required :passed, Integer
-
-
# @!attribute testing_criteria
-
# A description of the testing criteria.
-
#
-
# @return [String]
-
1
required :testing_criteria, String
-
-
# @!method initialize(failed:, passed:, testing_criteria:)
-
# @param failed [Integer] Number of tests failed for this criteria.
-
#
-
# @param passed [Integer] Number of tests passed for this criteria.
-
#
-
# @param testing_criteria [String] A description of the testing criteria.
-
end
-
-
# @see OpenAI::Models::Evals::RunListResponse#result_counts
-
1
class ResultCounts < OpenAI::Internal::Type::BaseModel
-
# @!attribute errored
-
# Number of output items that resulted in an error.
-
#
-
# @return [Integer]
-
1
required :errored, Integer
-
-
# @!attribute failed
-
# Number of output items that failed to pass the evaluation.
-
#
-
# @return [Integer]
-
1
required :failed, Integer
-
-
# @!attribute passed
-
# Number of output items that passed the evaluation.
-
#
-
# @return [Integer]
-
1
required :passed, Integer
-
-
# @!attribute total
-
# Total number of executed output items.
-
#
-
# @return [Integer]
-
1
required :total, Integer
-
-
# @!method initialize(errored:, failed:, passed:, total:)
-
# Counters summarizing the outcomes of the evaluation run.
-
#
-
# @param errored [Integer] Number of output items that resulted in an error.
-
#
-
# @param failed [Integer] Number of output items that failed to pass the evaluation.
-
#
-
# @param passed [Integer] Number of output items that passed the evaluation.
-
#
-
# @param total [Integer] Total number of executed output items.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Evals
-
# @see OpenAI::Resources::Evals::Runs#retrieve
-
1
class RunRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute eval_id
-
#
-
# @return [String]
-
1
required :eval_id, String
-
-
# @!method initialize(eval_id:, request_options: {})
-
# @param eval_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Evals
-
# @see OpenAI::Resources::Evals::Runs#retrieve
-
1
class RunRetrieveResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for the evaluation run.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# Unix timestamp (in seconds) when the evaluation run was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data_source
-
# Information about the run's data source.
-
#
-
# @return [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses]
-
1
required :data_source, union: -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource }
-
-
# @!attribute error
-
# An object representing an error response from the Eval API.
-
#
-
# @return [OpenAI::Models::Evals::EvalAPIError]
-
1
required :error, -> { OpenAI::Evals::EvalAPIError }
-
-
# @!attribute eval_id
-
# The identifier of the associated evaluation.
-
#
-
# @return [String]
-
1
required :eval_id, String
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute model
-
# The model that is evaluated, if applicable.
-
#
-
# @return [String]
-
1
required :model, String
-
-
# @!attribute name
-
# The name of the evaluation run.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute object
-
# The type of the object. Always "eval.run".
-
#
-
# @return [Symbol, :"eval.run"]
-
1
required :object, const: :"eval.run"
-
-
# @!attribute per_model_usage
-
# Usage statistics for each model during the evaluation run.
-
#
-
# @return [Array<OpenAI::Models::Evals::RunRetrieveResponse::PerModelUsage>]
-
1
required :per_model_usage,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunRetrieveResponse::PerModelUsage] }
-
-
# @!attribute per_testing_criteria_results
-
# Results per testing criteria applied during the evaluation run.
-
#
-
# @return [Array<OpenAI::Models::Evals::RunRetrieveResponse::PerTestingCriteriaResult>]
-
1
required :per_testing_criteria_results,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunRetrieveResponse::PerTestingCriteriaResult] }
-
-
# @!attribute report_url
-
# The URL to the rendered evaluation run report on the UI dashboard.
-
#
-
# @return [String]
-
1
required :report_url, String
-
-
# @!attribute result_counts
-
# Counters summarizing the outcomes of the evaluation run.
-
#
-
# @return [OpenAI::Models::Evals::RunRetrieveResponse::ResultCounts]
-
1
required :result_counts, -> { OpenAI::Models::Evals::RunRetrieveResponse::ResultCounts }
-
-
# @!attribute status
-
# The status of the evaluation run.
-
#
-
# @return [String]
-
1
required :status, String
-
-
# @!method initialize(id:, created_at:, data_source:, error:, eval_id:, metadata:, model:, name:, per_model_usage:, per_testing_criteria_results:, report_url:, result_counts:, status:, object: :"eval.run")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunRetrieveResponse} for more details.
-
#
-
# A schema representing an evaluation run.
-
#
-
# @param id [String] Unique identifier for the evaluation run.
-
#
-
# @param created_at [Integer] Unix timestamp (in seconds) when the evaluation run was created.
-
#
-
# @param data_source [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses] Information about the run's data source.
-
#
-
# @param error [OpenAI::Models::Evals::EvalAPIError] An object representing an error response from the Eval API.
-
#
-
# @param eval_id [String] The identifier of the associated evaluation.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param model [String] The model that is evaluated, if applicable.
-
#
-
# @param name [String] The name of the evaluation run.
-
#
-
# @param per_model_usage [Array<OpenAI::Models::Evals::RunRetrieveResponse::PerModelUsage>] Usage statistics for each model during the evaluation run.
-
#
-
# @param per_testing_criteria_results [Array<OpenAI::Models::Evals::RunRetrieveResponse::PerTestingCriteriaResult>] Results per testing criteria applied during the evaluation run.
-
#
-
# @param report_url [String] The URL to the rendered evaluation run report on the UI dashboard.
-
#
-
# @param result_counts [OpenAI::Models::Evals::RunRetrieveResponse::ResultCounts] Counters summarizing the outcomes of the evaluation run.
-
#
-
# @param status [String] The status of the evaluation run.
-
#
-
# @param object [Symbol, :"eval.run"] The type of the object. Always "eval.run".
-
-
# Information about the run's data source.
-
#
-
# @see OpenAI::Models::Evals::RunRetrieveResponse#data_source
-
1
module DataSource
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A JsonlRunDataSource object with that specifies a JSONL file that matches the eval
-
1
variant :jsonl, -> { OpenAI::Evals::CreateEvalJSONLRunDataSource }
-
-
# A CompletionsRunDataSource object describing a model sampling configuration.
-
1
variant :completions, -> { OpenAI::Evals::CreateEvalCompletionsRunDataSource }
-
-
# A ResponsesRunDataSource object describing a model sampling configuration.
-
1
variant :responses, -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses }
-
-
1
class Responses < OpenAI::Internal::Type::BaseModel
-
# @!attribute source
-
# Determines what populates the `item` namespace in this run's data source.
-
#
-
# @return [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::Responses]
-
1
required :source, union: -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source }
-
-
# @!attribute type
-
# The type of run data source. Always `responses`.
-
#
-
# @return [Symbol, :responses]
-
1
required :type, const: :responses
-
-
# @!attribute input_messages
-
# Used when sampling from a model. Dictates the structure of the messages passed
-
# into the model. Can either be a reference to a prebuilt trajectory (ie,
-
# `item.input_trajectory`), or a template with variable references to the `item`
-
# namespace.
-
#
-
# @return [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::ItemReference, nil]
-
1
optional :input_messages,
-
union: -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages }
-
-
# @!attribute model
-
# The name of the model to use for generating completions (e.g. "o3-mini").
-
#
-
# @return [String, nil]
-
1
optional :model, String
-
-
# @!attribute sampling_params
-
#
-
# @return [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams, nil]
-
1
optional :sampling_params,
-
-> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams }
-
-
# @!method initialize(source:, input_messages: nil, model: nil, sampling_params: nil, type: :responses)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses} for more
-
# details.
-
#
-
# A ResponsesRunDataSource object describing a model sampling configuration.
-
#
-
# @param source [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::Responses] Determines what populates the `item` namespace in this run's data source.
-
#
-
# @param input_messages [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::ItemReference] Used when sampling from a model. Dictates the structure of the messages passed i
-
#
-
# @param model [String] The name of the model to use for generating completions (e.g. "o3-mini").
-
#
-
# @param sampling_params [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams]
-
#
-
# @param type [Symbol, :responses] The type of run data source. Always `responses`.
-
-
# Determines what populates the `item` namespace in this run's data source.
-
#
-
# @see OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses#source
-
1
module Source
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :file_content,
-
-> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent }
-
-
1
variant :file_id, -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileID }
-
-
# A EvalResponsesSource object describing a run data source configuration.
-
1
variant :responses,
-
-> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::Responses }
-
-
1
class FileContent < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content of the jsonl file.
-
#
-
# @return [Array<OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent::Content>]
-
1
required :content,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent::Content] }
-
-
# @!attribute type
-
# The type of jsonl source. Always `file_content`.
-
#
-
# @return [Symbol, :file_content]
-
1
required :type, const: :file_content
-
-
# @!method initialize(content:, type: :file_content)
-
# @param content [Array<OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent::Content>] The content of the jsonl file.
-
#
-
# @param type [Symbol, :file_content] The type of jsonl source. Always `file_content`.
-
-
1
class Content < OpenAI::Internal::Type::BaseModel
-
# @!attribute item
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :item, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute sample
-
#
-
# @return [Hash{Symbol=>Object}, nil]
-
1
optional :sample, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!method initialize(item:, sample: nil)
-
# @param item [Hash{Symbol=>Object}]
-
# @param sample [Hash{Symbol=>Object}]
-
end
-
end
-
-
1
class FileID < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The identifier of the file.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute type
-
# The type of jsonl source. Always `file_id`.
-
#
-
# @return [Symbol, :file_id]
-
1
required :type, const: :file_id
-
-
# @!method initialize(id:, type: :file_id)
-
# @param id [String] The identifier of the file.
-
#
-
# @param type [Symbol, :file_id] The type of jsonl source. Always `file_id`.
-
end
-
-
1
class Responses < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of run data source. Always `responses`.
-
#
-
# @return [Symbol, :responses]
-
1
required :type, const: :responses
-
-
# @!attribute created_after
-
# Only include items created after this timestamp (inclusive). This is a query
-
# parameter used to select responses.
-
#
-
# @return [Integer, nil]
-
1
optional :created_after, Integer, nil?: true
-
-
# @!attribute created_before
-
# Only include items created before this timestamp (inclusive). This is a query
-
# parameter used to select responses.
-
#
-
# @return [Integer, nil]
-
1
optional :created_before, Integer, nil?: true
-
-
# @!attribute instructions_search
-
# Optional string to search the 'instructions' field. This is a query parameter
-
# used to select responses.
-
#
-
# @return [String, nil]
-
1
optional :instructions_search, String, nil?: true
-
-
# @!attribute metadata
-
# Metadata filter for the responses. This is a query parameter used to select
-
# responses.
-
#
-
# @return [Object, nil]
-
1
optional :metadata, OpenAI::Internal::Type::Unknown, nil?: true
-
-
# @!attribute model
-
# The name of the model to find responses for. This is a query parameter used to
-
# select responses.
-
#
-
# @return [String, nil]
-
1
optional :model, String, nil?: true
-
-
# @!attribute reasoning_effort
-
# Optional reasoning effort parameter. This is a query parameter used to select
-
# responses.
-
#
-
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
-
1
optional :reasoning_effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
-
-
# @!attribute temperature
-
# Sampling temperature. This is a query parameter used to select responses.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float, nil?: true
-
-
# @!attribute tools
-
# List of tool names. This is a query parameter used to select responses.
-
#
-
# @return [Array<String>, nil]
-
1
optional :tools, OpenAI::Internal::Type::ArrayOf[String], nil?: true
-
-
# @!attribute top_p
-
# Nucleus sampling parameter. This is a query parameter used to select responses.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float, nil?: true
-
-
# @!attribute users
-
# List of user identifiers. This is a query parameter used to select responses.
-
#
-
# @return [Array<String>, nil]
-
1
optional :users, OpenAI::Internal::Type::ArrayOf[String], nil?: true
-
-
# @!method initialize(created_after: nil, created_before: nil, instructions_search: nil, metadata: nil, model: nil, reasoning_effort: nil, temperature: nil, tools: nil, top_p: nil, users: nil, type: :responses)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::Responses}
-
# for more details.
-
#
-
# A EvalResponsesSource object describing a run data source configuration.
-
#
-
# @param created_after [Integer, nil] Only include items created after this timestamp (inclusive). This is a query par
-
#
-
# @param created_before [Integer, nil] Only include items created before this timestamp (inclusive). This is a query pa
-
#
-
# @param instructions_search [String, nil] Optional string to search the 'instructions' field. This is a query parameter us
-
#
-
# @param metadata [Object, nil] Metadata filter for the responses. This is a query parameter used to select resp
-
#
-
# @param model [String, nil] The name of the model to find responses for. This is a query parameter used to s
-
#
-
# @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Optional reasoning effort parameter. This is a query parameter used to select re
-
#
-
# @param temperature [Float, nil] Sampling temperature. This is a query parameter used to select responses.
-
#
-
# @param tools [Array<String>, nil] List of tool names. This is a query parameter used to select responses.
-
#
-
# @param top_p [Float, nil] Nucleus sampling parameter. This is a query parameter used to select responses.
-
#
-
# @param users [Array<String>, nil] List of user identifiers. This is a query parameter used to select responses.
-
#
-
# @param type [Symbol, :responses] The type of run data source. Always `responses`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileContent, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::FileID, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::Source::Responses)]
-
end
-
-
# Used when sampling from a model. Dictates the structure of the messages passed
-
# into the model. Can either be a reference to a prebuilt trajectory (ie,
-
# `item.input_trajectory`), or a template with variable references to the `item`
-
# namespace.
-
#
-
# @see OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses#input_messages
-
1
module InputMessages
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :template,
-
-> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template }
-
-
1
variant :item_reference,
-
-> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::ItemReference }
-
-
1
class Template < OpenAI::Internal::Type::BaseModel
-
# @!attribute template
-
# A list of chat messages forming the prompt or context. May include variable
-
# references to the `item` namespace, ie {{item.name}}.
-
#
-
# @return [Array<OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem>]
-
1
required :template,
-
-> do
-
OpenAI::Internal::Type::ArrayOf[
-
union: OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template
-
]
-
end
-
-
# @!attribute type
-
# The type of input messages. Always `template`.
-
#
-
# @return [Symbol, :template]
-
1
required :type, const: :template
-
-
# @!method initialize(template:, type: :template)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template}
-
# for more details.
-
#
-
# @param template [Array<OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem>] A list of chat messages forming the prompt or context. May include variable refe
-
#
-
# @param type [Symbol, :template] The type of input messages. Always `template`.
-
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
1
module Template
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage }
-
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
1
variant -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem }
-
-
1
class ChatMessage < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content of the message.
-
#
-
# @return [String]
-
1
required :content, String
-
-
# @!attribute role
-
# The role of the message (e.g. "system", "assistant", "user").
-
#
-
# @return [String]
-
1
required :role, String
-
-
# @!method initialize(content:, role:)
-
# @param content [String] The content of the message.
-
#
-
# @param role [String] The role of the message (e.g. "system", "assistant", "user").
-
end
-
-
1
class EvalItem < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# Inputs to the model - can contain template strings.
-
#
-
# @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>]
-
1
required :content,
-
union: -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content }
-
-
# @!attribute role
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @return [Symbol, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role]
-
1
required :role,
-
enum: -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role }
-
-
# @!attribute type
-
# The type of the message input. Always `message`.
-
#
-
# @return [Symbol, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type, nil]
-
1
optional :type,
-
enum: -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type }
-
-
# @!method initialize(content:, role:, type: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem}
-
# for more details.
-
#
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
#
-
# @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>] Inputs to the model - can contain template strings.
-
#
-
# @param role [Symbol, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Role] The role of the message input. One of `user`, `assistant`, `system`, or
-
#
-
# @param type [Symbol, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Type] The type of the message input. Always `message`.
-
-
# Inputs to the model - can contain template strings.
-
#
-
# @see OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# A text input to the model.
-
1
variant String
-
-
# A text input to the model.
-
1
variant -> { OpenAI::Responses::ResponseInputText }
-
-
# A text output from the model.
-
1
variant -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText }
-
-
# An image input to the model.
-
1
variant -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage }
-
-
# A list of inputs, each of which may be either an input text or input image object.
-
1
variant -> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::AnArrayOfInputTextAndInputImageArray }
-
-
1
class OutputText < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The text output from the model.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of the output text. Always `output_text`.
-
#
-
# @return [Symbol, :output_text]
-
1
required :type, const: :output_text
-
-
# @!method initialize(text:, type: :output_text)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText}
-
# for more details.
-
#
-
# A text output from the model.
-
#
-
# @param text [String] The text output from the model.
-
#
-
# @param type [Symbol, :output_text] The type of the output text. Always `output_text`.
-
end
-
-
1
class InputImage < OpenAI::Internal::Type::BaseModel
-
# @!attribute image_url
-
# The URL of the image input.
-
#
-
# @return [String]
-
1
required :image_url, String
-
-
# @!attribute type
-
# The type of the image input. Always `input_image`.
-
#
-
# @return [Symbol, :input_image]
-
1
required :type, const: :input_image
-
-
# @!attribute detail
-
# The detail level of the image to be sent to the model. One of `high`, `low`, or
-
# `auto`. Defaults to `auto`.
-
#
-
# @return [String, nil]
-
1
optional :detail, String
-
-
# @!method initialize(image_url:, detail: nil, type: :input_image)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage}
-
# for more details.
-
#
-
# An image input to the model.
-
#
-
# @param image_url [String] The URL of the image input.
-
#
-
# @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or
-
#
-
# @param type [Symbol, :input_image] The type of the image input. Always `input_image`.
-
end
-
-
# @!method self.variants
-
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::OutputText, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem::Content::InputImage, Array<Object>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
-
end
-
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @see OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#role
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
USER = :user
-
1
ASSISTANT = :assistant
-
1
SYSTEM = :system
-
1
DEVELOPER = :developer
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The type of the message input. Always `message`.
-
#
-
# @see OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
MESSAGE = :message
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::ChatMessage, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template::Template::EvalItem)]
-
end
-
end
-
-
1
class ItemReference < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_reference
-
# A reference to a variable in the `item` namespace. Ie, "item.name"
-
#
-
# @return [String]
-
1
required :item_reference, String
-
-
# @!attribute type
-
# The type of input messages. Always `item_reference`.
-
#
-
# @return [Symbol, :item_reference]
-
1
required :type, const: :item_reference
-
-
# @!method initialize(item_reference:, type: :item_reference)
-
# @param item_reference [String] A reference to a variable in the `item` namespace. Ie, "item.name"
-
#
-
# @param type [Symbol, :item_reference] The type of input messages. Always `item_reference`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::Template, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::InputMessages::ItemReference)]
-
end
-
-
# @see OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses#sampling_params
-
1
class SamplingParams < OpenAI::Internal::Type::BaseModel
-
# @!attribute max_completion_tokens
-
# The maximum number of tokens in the generated output.
-
#
-
# @return [Integer, nil]
-
1
optional :max_completion_tokens, Integer
-
-
# @!attribute seed
-
# A seed value to initialize the randomness, during sampling.
-
#
-
# @return [Integer, nil]
-
1
optional :seed, Integer
-
-
# @!attribute temperature
-
# A higher temperature increases randomness in the outputs.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float
-
-
# @!attribute text
-
# Configuration options for a text response from the model. Can be plain text or
-
# structured JSON data. Learn more:
-
#
-
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
#
-
# @return [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text, nil]
-
1
optional :text,
-
-> { OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text }
-
-
# @!attribute tools
-
# An array of tools the model may call while generating a response. You can
-
# specify which tool to use by setting the `tool_choice` parameter.
-
#
-
# The two categories of tools you can provide the model are:
-
#
-
# - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
-
# capabilities, like
-
# [web search](https://platform.openai.com/docs/guides/tools-web-search) or
-
# [file search](https://platform.openai.com/docs/guides/tools-file-search).
-
# Learn more about
-
# [built-in tools](https://platform.openai.com/docs/guides/tools).
-
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
-
# the model to call your own code. Learn more about
-
# [function calling](https://platform.openai.com/docs/guides/function-calling).
-
#
-
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>, nil]
-
1
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
-
-
# @!attribute top_p
-
# An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float
-
-
# @!method initialize(max_completion_tokens: nil, seed: nil, temperature: nil, text: nil, tools: nil, top_p: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams}
-
# for more details.
-
#
-
# @param max_completion_tokens [Integer] The maximum number of tokens in the generated output.
-
#
-
# @param seed [Integer] A seed value to initialize the randomness, during sampling.
-
#
-
# @param temperature [Float] A higher temperature increases randomness in the outputs.
-
#
-
# @param text [OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text] Configuration options for a text response from the model. Can be plain
-
#
-
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
-
#
-
# @param top_p [Float] An alternative to temperature for nucleus sampling; 1.0 includes all tokens.
-
-
# @see OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams#text
-
1
class Text < OpenAI::Internal::Type::BaseModel
-
# @!attribute format_
-
# An object specifying the format that the model must output.
-
#
-
# Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
-
# ensures the model will match your supplied JSON schema. Learn more in the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# The default format is `{ "type": "text" }` with no additional options.
-
#
-
# **Not recommended for gpt-4o and newer models:**
-
#
-
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
-
# ensures the message the model generates is valid JSON. Using `json_schema` is
-
# preferred for models that support it.
-
#
-
# @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil]
-
1
optional :format_,
-
union: -> {
-
OpenAI::Responses::ResponseFormatTextConfig
-
},
-
api_name: :format
-
-
# @!method initialize(format_: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses::SamplingParams::Text}
-
# for more details.
-
#
-
# Configuration options for a text response from the model. Can be plain text or
-
# structured JSON data. Learn more:
-
#
-
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
#
-
# @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
-
end
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunRetrieveResponse::DataSource::Responses)]
-
end
-
-
1
class PerModelUsage < OpenAI::Internal::Type::BaseModel
-
# @!attribute cached_tokens
-
# The number of tokens retrieved from cache.
-
#
-
# @return [Integer]
-
1
required :cached_tokens, Integer
-
-
# @!attribute completion_tokens
-
# The number of completion tokens generated.
-
#
-
# @return [Integer]
-
1
required :completion_tokens, Integer
-
-
# @!attribute invocation_count
-
# The number of invocations.
-
#
-
# @return [Integer]
-
1
required :invocation_count, Integer
-
-
# @!attribute model_name
-
# The name of the model.
-
#
-
# @return [String]
-
1
required :model_name, String
-
-
# @!attribute prompt_tokens
-
# The number of prompt tokens used.
-
#
-
# @return [Integer]
-
1
required :prompt_tokens, Integer
-
-
# @!attribute total_tokens
-
# The total number of tokens used.
-
#
-
# @return [Integer]
-
1
required :total_tokens, Integer
-
-
# @!method initialize(cached_tokens:, completion_tokens:, invocation_count:, model_name:, prompt_tokens:, total_tokens:)
-
# @param cached_tokens [Integer] The number of tokens retrieved from cache.
-
#
-
# @param completion_tokens [Integer] The number of completion tokens generated.
-
#
-
# @param invocation_count [Integer] The number of invocations.
-
#
-
# @param model_name [String] The name of the model.
-
#
-
# @param prompt_tokens [Integer] The number of prompt tokens used.
-
#
-
# @param total_tokens [Integer] The total number of tokens used.
-
end
-
-
1
class PerTestingCriteriaResult < OpenAI::Internal::Type::BaseModel
-
# @!attribute failed
-
# Number of tests failed for this criteria.
-
#
-
# @return [Integer]
-
1
required :failed, Integer
-
-
# @!attribute passed
-
# Number of tests passed for this criteria.
-
#
-
# @return [Integer]
-
1
required :passed, Integer
-
-
# @!attribute testing_criteria
-
# A description of the testing criteria.
-
#
-
# @return [String]
-
1
required :testing_criteria, String
-
-
# @!method initialize(failed:, passed:, testing_criteria:)
-
# @param failed [Integer] Number of tests failed for this criteria.
-
#
-
# @param passed [Integer] Number of tests passed for this criteria.
-
#
-
# @param testing_criteria [String] A description of the testing criteria.
-
end
-
-
# @see OpenAI::Models::Evals::RunRetrieveResponse#result_counts
-
1
class ResultCounts < OpenAI::Internal::Type::BaseModel
-
# @!attribute errored
-
# Number of output items that resulted in an error.
-
#
-
# @return [Integer]
-
1
required :errored, Integer
-
-
# @!attribute failed
-
# Number of output items that failed to pass the evaluation.
-
#
-
# @return [Integer]
-
1
required :failed, Integer
-
-
# @!attribute passed
-
# Number of output items that passed the evaluation.
-
#
-
# @return [Integer]
-
1
required :passed, Integer
-
-
# @!attribute total
-
# Total number of executed output items.
-
#
-
# @return [Integer]
-
1
required :total, Integer
-
-
# @!method initialize(errored:, failed:, passed:, total:)
-
# Counters summarizing the outcomes of the evaluation run.
-
#
-
# @param errored [Integer] Number of output items that resulted in an error.
-
#
-
# @param failed [Integer] Number of output items that failed to pass the evaluation.
-
#
-
# @param passed [Integer] Number of output items that passed the evaluation.
-
#
-
# @param total [Integer] Total number of executed output items.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Evals
-
1
module Runs
-
# @see OpenAI::Resources::Evals::Runs::OutputItems#list
-
1
class OutputItemListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute eval_id
-
#
-
# @return [String]
-
1
required :eval_id, String
-
-
# @!attribute after
-
# Identifier for the last output item from the previous pagination request.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute limit
-
# Number of output items to retrieve.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!attribute order
-
# Sort order for output items by timestamp. Use `asc` for ascending order or
-
# `desc` for descending order. Defaults to `asc`.
-
#
-
# @return [Symbol, OpenAI::Models::Evals::Runs::OutputItemListParams::Order, nil]
-
1
optional :order, enum: -> { OpenAI::Evals::Runs::OutputItemListParams::Order }
-
-
# @!attribute status
-
# Filter output items by status. Use `failed` to filter by failed output items or
-
# `pass` to filter by passed output items.
-
#
-
# @return [Symbol, OpenAI::Models::Evals::Runs::OutputItemListParams::Status, nil]
-
1
optional :status, enum: -> { OpenAI::Evals::Runs::OutputItemListParams::Status }
-
-
# @!method initialize(eval_id:, after: nil, limit: nil, order: nil, status: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::Runs::OutputItemListParams} for more details.
-
#
-
# @param eval_id [String]
-
#
-
# @param after [String] Identifier for the last output item from the previous pagination request.
-
#
-
# @param limit [Integer] Number of output items to retrieve.
-
#
-
# @param order [Symbol, OpenAI::Models::Evals::Runs::OutputItemListParams::Order] Sort order for output items by timestamp. Use `asc` for ascending order or `desc
-
#
-
# @param status [Symbol, OpenAI::Models::Evals::Runs::OutputItemListParams::Status] Filter output items by status. Use `failed` to filter by failed output
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Sort order for output items by timestamp. Use `asc` for ascending order or
-
# `desc` for descending order. Defaults to `asc`.
-
1
module Order
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASC = :asc
-
1
DESC = :desc
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# Filter output items by status. Use `failed` to filter by failed output items or
-
# `pass` to filter by passed output items.
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
FAIL = :fail
-
1
PASS = :pass
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Evals
-
1
module Runs
-
# @see OpenAI::Resources::Evals::Runs::OutputItems#list
-
1
class OutputItemListResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for the evaluation run output item.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# Unix timestamp (in seconds) when the evaluation run was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute datasource_item
-
# Details of the input data source item.
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :datasource_item, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute datasource_item_id
-
# The identifier for the data source item.
-
#
-
# @return [Integer]
-
1
required :datasource_item_id, Integer
-
-
# @!attribute eval_id
-
# The identifier of the evaluation group.
-
#
-
# @return [String]
-
1
required :eval_id, String
-
-
# @!attribute object
-
# The type of the object. Always "eval.run.output_item".
-
#
-
# @return [Symbol, :"eval.run.output_item"]
-
1
required :object, const: :"eval.run.output_item"
-
-
# @!attribute results
-
# A list of results from the evaluation run.
-
#
-
# @return [Array<Hash{Symbol=>Object}>]
-
1
required :results,
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]]
-
-
# @!attribute run_id
-
# The identifier of the evaluation run associated with this output item.
-
#
-
# @return [String]
-
1
required :run_id, String
-
-
# @!attribute sample
-
# A sample containing the input and output of the evaluation run.
-
#
-
# @return [OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample]
-
1
required :sample, -> { OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample }
-
-
# @!attribute status
-
# The status of the evaluation run.
-
#
-
# @return [String]
-
1
required :status, String
-
-
# @!method initialize(id:, created_at:, datasource_item:, datasource_item_id:, eval_id:, results:, run_id:, sample:, status:, object: :"eval.run.output_item")
-
# A schema representing an evaluation run output item.
-
#
-
# @param id [String] Unique identifier for the evaluation run output item.
-
#
-
# @param created_at [Integer] Unix timestamp (in seconds) when the evaluation run was created.
-
#
-
# @param datasource_item [Hash{Symbol=>Object}] Details of the input data source item.
-
#
-
# @param datasource_item_id [Integer] The identifier for the data source item.
-
#
-
# @param eval_id [String] The identifier of the evaluation group.
-
#
-
# @param results [Array<Hash{Symbol=>Object}>] A list of results from the evaluation run.
-
#
-
# @param run_id [String] The identifier of the evaluation run associated with this output item.
-
#
-
# @param sample [OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample] A sample containing the input and output of the evaluation run.
-
#
-
# @param status [String] The status of the evaluation run.
-
#
-
# @param object [Symbol, :"eval.run.output_item"] The type of the object. Always "eval.run.output_item".
-
-
# @see OpenAI::Models::Evals::Runs::OutputItemListResponse#sample
-
1
class Sample < OpenAI::Internal::Type::BaseModel
-
# @!attribute error
-
# An object representing an error response from the Eval API.
-
#
-
# @return [OpenAI::Models::Evals::EvalAPIError]
-
1
required :error, -> { OpenAI::Evals::EvalAPIError }
-
-
# @!attribute finish_reason
-
# The reason why the sample generation was finished.
-
#
-
# @return [String]
-
1
required :finish_reason, String
-
-
# @!attribute input
-
# An array of input messages.
-
#
-
# @return [Array<OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Input>]
-
1
required :input,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Input] }
-
-
# @!attribute max_completion_tokens
-
# The maximum number of tokens allowed for completion.
-
#
-
# @return [Integer]
-
1
required :max_completion_tokens, Integer
-
-
# @!attribute model
-
# The model used for generating the sample.
-
#
-
# @return [String]
-
1
required :model, String
-
-
# @!attribute output
-
# An array of output messages.
-
#
-
# @return [Array<OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Output>]
-
1
required :output,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Output] }
-
-
# @!attribute seed
-
# The seed used for generating the sample.
-
#
-
# @return [Integer]
-
1
required :seed, Integer
-
-
# @!attribute temperature
-
# The sampling temperature used.
-
#
-
# @return [Float]
-
1
required :temperature, Float
-
-
# @!attribute top_p
-
# The top_p value used for sampling.
-
#
-
# @return [Float]
-
1
required :top_p, Float
-
-
# @!attribute usage
-
# Token usage details for the sample.
-
#
-
# @return [OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Usage]
-
1
required :usage, -> { OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Usage }
-
-
# @!method initialize(error:, finish_reason:, input:, max_completion_tokens:, model:, output:, seed:, temperature:, top_p:, usage:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample} for more details.
-
#
-
# A sample containing the input and output of the evaluation run.
-
#
-
# @param error [OpenAI::Models::Evals::EvalAPIError] An object representing an error response from the Eval API.
-
#
-
# @param finish_reason [String] The reason why the sample generation was finished.
-
#
-
# @param input [Array<OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Input>] An array of input messages.
-
#
-
# @param max_completion_tokens [Integer] The maximum number of tokens allowed for completion.
-
#
-
# @param model [String] The model used for generating the sample.
-
#
-
# @param output [Array<OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Output>] An array of output messages.
-
#
-
# @param seed [Integer] The seed used for generating the sample.
-
#
-
# @param temperature [Float] The sampling temperature used.
-
#
-
# @param top_p [Float] The top_p value used for sampling.
-
#
-
# @param usage [OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample::Usage] Token usage details for the sample.
-
-
1
class Input < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content of the message.
-
#
-
# @return [String]
-
1
required :content, String
-
-
# @!attribute role
-
# The role of the message sender (e.g., system, user, developer).
-
#
-
# @return [String]
-
1
required :role, String
-
-
# @!method initialize(content:, role:)
-
# An input message.
-
#
-
# @param content [String] The content of the message.
-
#
-
# @param role [String] The role of the message sender (e.g., system, user, developer).
-
end
-
-
1
class Output < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content of the message.
-
#
-
# @return [String, nil]
-
1
optional :content, String
-
-
# @!attribute role
-
# The role of the message (e.g. "system", "assistant", "user").
-
#
-
# @return [String, nil]
-
1
optional :role, String
-
-
# @!method initialize(content: nil, role: nil)
-
# @param content [String] The content of the message.
-
#
-
# @param role [String] The role of the message (e.g. "system", "assistant", "user").
-
end
-
-
# @see OpenAI::Models::Evals::Runs::OutputItemListResponse::Sample#usage
-
1
class Usage < OpenAI::Internal::Type::BaseModel
-
# @!attribute cached_tokens
-
# The number of tokens retrieved from cache.
-
#
-
# @return [Integer]
-
1
required :cached_tokens, Integer
-
-
# @!attribute completion_tokens
-
# The number of completion tokens generated.
-
#
-
# @return [Integer]
-
1
required :completion_tokens, Integer
-
-
# @!attribute prompt_tokens
-
# The number of prompt tokens used.
-
#
-
# @return [Integer]
-
1
required :prompt_tokens, Integer
-
-
# @!attribute total_tokens
-
# The total number of tokens used.
-
#
-
# @return [Integer]
-
1
required :total_tokens, Integer
-
-
# @!method initialize(cached_tokens:, completion_tokens:, prompt_tokens:, total_tokens:)
-
# Token usage details for the sample.
-
#
-
# @param cached_tokens [Integer] The number of tokens retrieved from cache.
-
#
-
# @param completion_tokens [Integer] The number of completion tokens generated.
-
#
-
# @param prompt_tokens [Integer] The number of prompt tokens used.
-
#
-
# @param total_tokens [Integer] The total number of tokens used.
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Evals
-
1
module Runs
-
# @see OpenAI::Resources::Evals::Runs::OutputItems#retrieve
-
1
class OutputItemRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute eval_id
-
#
-
# @return [String]
-
1
required :eval_id, String
-
-
# @!attribute run_id
-
#
-
# @return [String]
-
1
required :run_id, String
-
-
# @!method initialize(eval_id:, run_id:, request_options: {})
-
# @param eval_id [String]
-
# @param run_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Evals
-
1
module Runs
-
# @see OpenAI::Resources::Evals::Runs::OutputItems#retrieve
-
1
class OutputItemRetrieveResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for the evaluation run output item.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# Unix timestamp (in seconds) when the evaluation run was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute datasource_item
-
# Details of the input data source item.
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :datasource_item, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute datasource_item_id
-
# The identifier for the data source item.
-
#
-
# @return [Integer]
-
1
required :datasource_item_id, Integer
-
-
# @!attribute eval_id
-
# The identifier of the evaluation group.
-
#
-
# @return [String]
-
1
required :eval_id, String
-
-
# @!attribute object
-
# The type of the object. Always "eval.run.output_item".
-
#
-
# @return [Symbol, :"eval.run.output_item"]
-
1
required :object, const: :"eval.run.output_item"
-
-
# @!attribute results
-
# A list of results from the evaluation run.
-
#
-
# @return [Array<Hash{Symbol=>Object}>]
-
1
required :results,
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]]
-
-
# @!attribute run_id
-
# The identifier of the evaluation run associated with this output item.
-
#
-
# @return [String]
-
1
required :run_id, String
-
-
# @!attribute sample
-
# A sample containing the input and output of the evaluation run.
-
#
-
# @return [OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample]
-
1
required :sample, -> { OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample }
-
-
# @!attribute status
-
# The status of the evaluation run.
-
#
-
# @return [String]
-
1
required :status, String
-
-
# @!method initialize(id:, created_at:, datasource_item:, datasource_item_id:, eval_id:, results:, run_id:, sample:, status:, object: :"eval.run.output_item")
-
# A schema representing an evaluation run output item.
-
#
-
# @param id [String] Unique identifier for the evaluation run output item.
-
#
-
# @param created_at [Integer] Unix timestamp (in seconds) when the evaluation run was created.
-
#
-
# @param datasource_item [Hash{Symbol=>Object}] Details of the input data source item.
-
#
-
# @param datasource_item_id [Integer] The identifier for the data source item.
-
#
-
# @param eval_id [String] The identifier of the evaluation group.
-
#
-
# @param results [Array<Hash{Symbol=>Object}>] A list of results from the evaluation run.
-
#
-
# @param run_id [String] The identifier of the evaluation run associated with this output item.
-
#
-
# @param sample [OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample] A sample containing the input and output of the evaluation run.
-
#
-
# @param status [String] The status of the evaluation run.
-
#
-
# @param object [Symbol, :"eval.run.output_item"] The type of the object. Always "eval.run.output_item".
-
-
# @see OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse#sample
-
1
class Sample < OpenAI::Internal::Type::BaseModel
-
# @!attribute error
-
# An object representing an error response from the Eval API.
-
#
-
# @return [OpenAI::Models::Evals::EvalAPIError]
-
1
required :error, -> { OpenAI::Evals::EvalAPIError }
-
-
# @!attribute finish_reason
-
# The reason why the sample generation was finished.
-
#
-
# @return [String]
-
1
required :finish_reason, String
-
-
# @!attribute input
-
# An array of input messages.
-
#
-
# @return [Array<OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Input>]
-
1
required :input,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Input] }
-
-
# @!attribute max_completion_tokens
-
# The maximum number of tokens allowed for completion.
-
#
-
# @return [Integer]
-
1
required :max_completion_tokens, Integer
-
-
# @!attribute model
-
# The model used for generating the sample.
-
#
-
# @return [String]
-
1
required :model, String
-
-
# @!attribute output
-
# An array of output messages.
-
#
-
# @return [Array<OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Output>]
-
1
required :output,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Output] }
-
-
# @!attribute seed
-
# The seed used for generating the sample.
-
#
-
# @return [Integer]
-
1
required :seed, Integer
-
-
# @!attribute temperature
-
# The sampling temperature used.
-
#
-
# @return [Float]
-
1
required :temperature, Float
-
-
# @!attribute top_p
-
# The top_p value used for sampling.
-
#
-
# @return [Float]
-
1
required :top_p, Float
-
-
# @!attribute usage
-
# Token usage details for the sample.
-
#
-
# @return [OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Usage]
-
1
required :usage, -> { OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Usage }
-
-
# @!method initialize(error:, finish_reason:, input:, max_completion_tokens:, model:, output:, seed:, temperature:, top_p:, usage:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample} for more
-
# details.
-
#
-
# A sample containing the input and output of the evaluation run.
-
#
-
# @param error [OpenAI::Models::Evals::EvalAPIError] An object representing an error response from the Eval API.
-
#
-
# @param finish_reason [String] The reason why the sample generation was finished.
-
#
-
# @param input [Array<OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Input>] An array of input messages.
-
#
-
# @param max_completion_tokens [Integer] The maximum number of tokens allowed for completion.
-
#
-
# @param model [String] The model used for generating the sample.
-
#
-
# @param output [Array<OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Output>] An array of output messages.
-
#
-
# @param seed [Integer] The seed used for generating the sample.
-
#
-
# @param temperature [Float] The sampling temperature used.
-
#
-
# @param top_p [Float] The top_p value used for sampling.
-
#
-
# @param usage [OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample::Usage] Token usage details for the sample.
-
-
1
class Input < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content of the message.
-
#
-
# @return [String]
-
1
required :content, String
-
-
# @!attribute role
-
# The role of the message sender (e.g., system, user, developer).
-
#
-
# @return [String]
-
1
required :role, String
-
-
# @!method initialize(content:, role:)
-
# An input message.
-
#
-
# @param content [String] The content of the message.
-
#
-
# @param role [String] The role of the message sender (e.g., system, user, developer).
-
end
-
-
1
class Output < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# The content of the message.
-
#
-
# @return [String, nil]
-
1
optional :content, String
-
-
# @!attribute role
-
# The role of the message (e.g. "system", "assistant", "user").
-
#
-
# @return [String, nil]
-
1
optional :role, String
-
-
# @!method initialize(content: nil, role: nil)
-
# @param content [String] The content of the message.
-
#
-
# @param role [String] The role of the message (e.g. "system", "assistant", "user").
-
end
-
-
# @see OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse::Sample#usage
-
1
class Usage < OpenAI::Internal::Type::BaseModel
-
# @!attribute cached_tokens
-
# The number of tokens retrieved from cache.
-
#
-
# @return [Integer]
-
1
required :cached_tokens, Integer
-
-
# @!attribute completion_tokens
-
# The number of completion tokens generated.
-
#
-
# @return [Integer]
-
1
required :completion_tokens, Integer
-
-
# @!attribute prompt_tokens
-
# The number of prompt tokens used.
-
#
-
# @return [Integer]
-
1
required :prompt_tokens, Integer
-
-
# @!attribute total_tokens
-
# The total number of tokens used.
-
#
-
# @return [Integer]
-
1
required :total_tokens, Integer
-
-
# @!method initialize(cached_tokens:, completion_tokens:, prompt_tokens:, total_tokens:)
-
# Token usage details for the sample.
-
#
-
# @param cached_tokens [Integer] The number of tokens retrieved from cache.
-
#
-
# @param completion_tokens [Integer] The number of completion tokens generated.
-
#
-
# @param prompt_tokens [Integer] The number of prompt tokens used.
-
#
-
# @param total_tokens [Integer] The total number of tokens used.
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# The strategy used to chunk the file.
-
1
module FileChunkingStrategy
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :static, -> { OpenAI::StaticFileChunkingStrategyObject }
-
-
# This is returned when the chunking strategy is unknown. Typically, this is because the file was indexed before the `chunking_strategy` concept was introduced in the API.
-
1
variant :other, -> { OpenAI::OtherFileChunkingStrategyObject }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::StaticFileChunkingStrategyObject, OpenAI::Models::OtherFileChunkingStrategyObject)]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
# strategy. Only applicable if `file_ids` is non-empty.
-
1
module FileChunkingStrategyParam
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# The default strategy. This strategy currently uses a `max_chunk_size_tokens` of `800` and `chunk_overlap_tokens` of `400`.
-
1
variant :auto, -> { OpenAI::AutoFileChunkingStrategyParam }
-
-
# Customize your own chunking strategy by setting chunk size and chunk overlap.
-
1
variant :static, -> { OpenAI::StaticFileChunkingStrategyObjectParam }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam)]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
FileContent = String
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Files#content
-
1
class FileContentParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Files#create
-
1
class FileCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute file
-
# The File object (not file name) to be uploaded.
-
#
-
# @return [Pathname, StringIO, IO, String, OpenAI::FilePart]
-
1
required :file, OpenAI::Internal::Type::FileInput
-
-
# @!attribute purpose
-
# The intended purpose of the uploaded file. One of: - `assistants`: Used in the
-
# Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for
-
# fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
-
# Flexible file type for any purpose - `evals`: Used for eval data sets
-
#
-
# @return [Symbol, OpenAI::Models::FilePurpose]
-
1
required :purpose, enum: -> { OpenAI::FilePurpose }
-
-
# @!method initialize(file:, purpose:, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FileCreateParams} for more details.
-
#
-
# @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The File object (not file name) to be uploaded.
-
#
-
# @param purpose [Symbol, OpenAI::Models::FilePurpose] The intended purpose of the uploaded file. One of: - `assistants`: Used in the A
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Files#delete
-
1
class FileDeleteParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Files#delete
-
1
class FileDeleted < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute deleted
-
#
-
# @return [Boolean]
-
1
required :deleted, OpenAI::Internal::Type::Boolean
-
-
# @!attribute object
-
#
-
# @return [Symbol, :file]
-
1
required :object, const: :file
-
-
# @!method initialize(id:, deleted:, object: :file)
-
# @param id [String]
-
# @param deleted [Boolean]
-
# @param object [Symbol, :file]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Files#list
-
1
class FileListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute after
-
# A cursor for use in pagination. `after` is an object ID that defines your place
-
# in the list. For instance, if you make a list request and receive 100 objects,
-
# ending with obj_foo, your subsequent call can include after=obj_foo in order to
-
# fetch the next page of the list.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute limit
-
# A limit on the number of objects to be returned. Limit can range between 1 and
-
# 10,000, and the default is 10,000.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!attribute order
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
#
-
# @return [Symbol, OpenAI::Models::FileListParams::Order, nil]
-
1
optional :order, enum: -> { OpenAI::FileListParams::Order }
-
-
# @!attribute purpose
-
# Only return files with the given purpose.
-
#
-
# @return [String, nil]
-
1
optional :purpose, String
-
-
# @!method initialize(after: nil, limit: nil, order: nil, purpose: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FileListParams} for more details.
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param order [Symbol, OpenAI::Models::FileListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord
-
#
-
# @param purpose [String] Only return files with the given purpose.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
1
module Order
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASC = :asc
-
1
DESC = :desc
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Files#create
-
1
class FileObject < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The file identifier, which can be referenced in the API endpoints.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute bytes
-
# The size of the file, in bytes.
-
#
-
# @return [Integer]
-
1
required :bytes, Integer
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the file was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute filename
-
# The name of the file.
-
#
-
# @return [String]
-
1
required :filename, String
-
-
# @!attribute object
-
# The object type, which is always `file`.
-
#
-
# @return [Symbol, :file]
-
1
required :object, const: :file
-
-
# @!attribute purpose
-
# The intended purpose of the file. Supported values are `assistants`,
-
# `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results`,
-
# `vision`, and `user_data`.
-
#
-
# @return [Symbol, OpenAI::Models::FileObject::Purpose]
-
1
required :purpose, enum: -> { OpenAI::FileObject::Purpose }
-
-
# @!attribute status
-
# @deprecated
-
#
-
# Deprecated. The current status of the file, which can be either `uploaded`,
-
# `processed`, or `error`.
-
#
-
# @return [Symbol, OpenAI::Models::FileObject::Status]
-
1
required :status, enum: -> { OpenAI::FileObject::Status }
-
-
# @!attribute expires_at
-
# The Unix timestamp (in seconds) for when the file will expire.
-
#
-
# @return [Integer, nil]
-
1
optional :expires_at, Integer
-
-
# @!attribute status_details
-
# @deprecated
-
#
-
# Deprecated. For details on why a fine-tuning training file failed validation,
-
# see the `error` field on `fine_tuning.job`.
-
#
-
# @return [String, nil]
-
1
optional :status_details, String
-
-
# @!method initialize(id:, bytes:, created_at:, filename:, purpose:, status:, expires_at: nil, status_details: nil, object: :file)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FileObject} for more details.
-
#
-
# The `File` object represents a document that has been uploaded to OpenAI.
-
#
-
# @param id [String] The file identifier, which can be referenced in the API endpoints.
-
#
-
# @param bytes [Integer] The size of the file, in bytes.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the file was created.
-
#
-
# @param filename [String] The name of the file.
-
#
-
# @param purpose [Symbol, OpenAI::Models::FileObject::Purpose] The intended purpose of the file. Supported values are `assistants`, `assistants
-
#
-
# @param status [Symbol, OpenAI::Models::FileObject::Status] Deprecated. The current status of the file, which can be either `uploaded`, `pro
-
#
-
# @param expires_at [Integer] The Unix timestamp (in seconds) for when the file will expire.
-
#
-
# @param status_details [String] Deprecated. For details on why a fine-tuning training file failed validation, se
-
#
-
# @param object [Symbol, :file] The object type, which is always `file`.
-
-
# The intended purpose of the file. Supported values are `assistants`,
-
# `assistants_output`, `batch`, `batch_output`, `fine-tune`, `fine-tune-results`,
-
# `vision`, and `user_data`.
-
#
-
# @see OpenAI::Models::FileObject#purpose
-
1
module Purpose
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASSISTANTS = :assistants
-
1
ASSISTANTS_OUTPUT = :assistants_output
-
1
BATCH = :batch
-
1
BATCH_OUTPUT = :batch_output
-
1
FINE_TUNE = :"fine-tune"
-
1
FINE_TUNE_RESULTS = :"fine-tune-results"
-
1
VISION = :vision
-
1
USER_DATA = :user_data
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @deprecated
-
#
-
# Deprecated. The current status of the file, which can be either `uploaded`,
-
# `processed`, or `error`.
-
#
-
# @see OpenAI::Models::FileObject#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
UPLOADED = :uploaded
-
1
PROCESSED = :processed
-
1
ERROR = :error
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# The intended purpose of the uploaded file. One of: - `assistants`: Used in the
-
# Assistants API - `batch`: Used in the Batch API - `fine-tune`: Used for
-
# fine-tuning - `vision`: Images used for vision fine-tuning - `user_data`:
-
# Flexible file type for any purpose - `evals`: Used for eval data sets
-
1
module FilePurpose
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASSISTANTS = :assistants
-
1
BATCH = :batch
-
1
FINE_TUNE = :"fine-tune"
-
1
VISION = :vision
-
1
USER_DATA = :user_data
-
1
EVALS = :evals
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Files#retrieve
-
1
class FileRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
module Alpha
-
# @see OpenAI::Resources::FineTuning::Alpha::Graders#run
-
1
class GraderRunParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute grader
-
# The grader used for the fine-tuning job.
-
#
-
# @return [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader]
-
1
required :grader, union: -> { OpenAI::FineTuning::Alpha::GraderRunParams::Grader }
-
-
# @!attribute model_sample
-
# The model sample to be evaluated. This value will be used to populate the
-
# `sample` namespace. See
-
# [the guide](https://platform.openai.com/docs/guides/graders) for more details.
-
# The `output_json` variable will be populated if the model sample is a valid JSON
-
# string.
-
#
-
# @return [String]
-
1
required :model_sample, String
-
-
# @!attribute item
-
# The dataset item provided to the grader. This will be used to populate the
-
# `item` namespace. See
-
# [the guide](https://platform.openai.com/docs/guides/graders) for more details.
-
#
-
# @return [Object, nil]
-
1
optional :item, OpenAI::Internal::Type::Unknown
-
-
# @!method initialize(grader:, model_sample:, item: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::Alpha::GraderRunParams} for more details.
-
#
-
# @param grader [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] The grader used for the fine-tuning job.
-
#
-
# @param model_sample [String] The model sample to be evaluated. This value will be used to populate
-
#
-
# @param item [Object] The dataset item provided to the grader. This will be used to populate
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# The grader used for the fine-tuning job.
-
1
module Grader
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A StringCheckGrader object that performs a string comparison between input and reference using a specified operation.
-
1
variant :string_check, -> { OpenAI::Graders::StringCheckGrader }
-
-
# A TextSimilarityGrader object which grades text based on similarity metrics.
-
1
variant :text_similarity, -> { OpenAI::Graders::TextSimilarityGrader }
-
-
# A PythonGrader object that runs a python script on the input.
-
1
variant :python, -> { OpenAI::Graders::PythonGrader }
-
-
# A ScoreModelGrader object that uses a model to assign a score to the input.
-
1
variant :score_model, -> { OpenAI::Graders::ScoreModelGrader }
-
-
# A MultiGrader object combines the output of multiple graders to produce a single score.
-
1
variant :multi, -> { OpenAI::Graders::MultiGrader }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader)]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
module Alpha
-
# @see OpenAI::Resources::FineTuning::Alpha::Graders#run
-
1
class GraderRunResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute metadata
-
#
-
# @return [OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata]
-
1
required :metadata, -> { OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata }
-
-
# @!attribute model_grader_token_usage_per_model
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :model_grader_token_usage_per_model,
-
OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute reward
-
#
-
# @return [Float]
-
1
required :reward, Float
-
-
# @!attribute sub_rewards
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :sub_rewards, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!method initialize(metadata:, model_grader_token_usage_per_model:, reward:, sub_rewards:)
-
# @param metadata [OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata]
-
# @param model_grader_token_usage_per_model [Hash{Symbol=>Object}]
-
# @param reward [Float]
-
# @param sub_rewards [Hash{Symbol=>Object}]
-
-
# @see OpenAI::Models::FineTuning::Alpha::GraderRunResponse#metadata
-
1
class Metadata < OpenAI::Internal::Type::BaseModel
-
# @!attribute errors
-
#
-
# @return [OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata::Errors]
-
1
required :errors, -> { OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata::Errors }
-
-
# @!attribute execution_time
-
#
-
# @return [Float]
-
1
required :execution_time, Float
-
-
# @!attribute name
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute sampled_model_name
-
#
-
# @return [String, nil]
-
1
required :sampled_model_name, String, nil?: true
-
-
# @!attribute scores
-
#
-
# @return [Hash{Symbol=>Object}]
-
1
required :scores, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute token_usage
-
#
-
# @return [Integer, nil]
-
1
required :token_usage, Integer, nil?: true
-
-
# @!attribute type
-
#
-
# @return [String]
-
1
required :type, String
-
-
# @!method initialize(errors:, execution_time:, name:, sampled_model_name:, scores:, token_usage:, type:)
-
# @param errors [OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata::Errors]
-
# @param execution_time [Float]
-
# @param name [String]
-
# @param sampled_model_name [String, nil]
-
# @param scores [Hash{Symbol=>Object}]
-
# @param token_usage [Integer, nil]
-
# @param type [String]
-
-
# @see OpenAI::Models::FineTuning::Alpha::GraderRunResponse::Metadata#errors
-
1
class Errors < OpenAI::Internal::Type::BaseModel
-
# @!attribute formula_parse_error
-
#
-
# @return [Boolean]
-
1
required :formula_parse_error, OpenAI::Internal::Type::Boolean
-
-
# @!attribute invalid_variable_error
-
#
-
# @return [Boolean]
-
1
required :invalid_variable_error, OpenAI::Internal::Type::Boolean
-
-
# @!attribute model_grader_parse_error
-
#
-
# @return [Boolean]
-
1
required :model_grader_parse_error, OpenAI::Internal::Type::Boolean
-
-
# @!attribute model_grader_refusal_error
-
#
-
# @return [Boolean]
-
1
required :model_grader_refusal_error, OpenAI::Internal::Type::Boolean
-
-
# @!attribute model_grader_server_error
-
#
-
# @return [Boolean]
-
1
required :model_grader_server_error, OpenAI::Internal::Type::Boolean
-
-
# @!attribute model_grader_server_error_details
-
#
-
# @return [String, nil]
-
1
required :model_grader_server_error_details, String, nil?: true
-
-
# @!attribute other_error
-
#
-
# @return [Boolean]
-
1
required :other_error, OpenAI::Internal::Type::Boolean
-
-
# @!attribute python_grader_runtime_error
-
#
-
# @return [Boolean]
-
1
required :python_grader_runtime_error, OpenAI::Internal::Type::Boolean
-
-
# @!attribute python_grader_runtime_error_details
-
#
-
# @return [String, nil]
-
1
required :python_grader_runtime_error_details, String, nil?: true
-
-
# @!attribute python_grader_server_error
-
#
-
# @return [Boolean]
-
1
required :python_grader_server_error, OpenAI::Internal::Type::Boolean
-
-
# @!attribute python_grader_server_error_type
-
#
-
# @return [String, nil]
-
1
required :python_grader_server_error_type, String, nil?: true
-
-
# @!attribute sample_parse_error
-
#
-
# @return [Boolean]
-
1
required :sample_parse_error, OpenAI::Internal::Type::Boolean
-
-
# @!attribute truncated_observation_error
-
#
-
# @return [Boolean]
-
1
required :truncated_observation_error, OpenAI::Internal::Type::Boolean
-
-
# @!attribute unresponsive_reward_error
-
#
-
# @return [Boolean]
-
1
required :unresponsive_reward_error, OpenAI::Internal::Type::Boolean
-
-
# @!method initialize(formula_parse_error:, invalid_variable_error:, model_grader_parse_error:, model_grader_refusal_error:, model_grader_server_error:, model_grader_server_error_details:, other_error:, python_grader_runtime_error:, python_grader_runtime_error_details:, python_grader_server_error:, python_grader_server_error_type:, sample_parse_error:, truncated_observation_error:, unresponsive_reward_error:)
-
# @param formula_parse_error [Boolean]
-
# @param invalid_variable_error [Boolean]
-
# @param model_grader_parse_error [Boolean]
-
# @param model_grader_refusal_error [Boolean]
-
# @param model_grader_server_error [Boolean]
-
# @param model_grader_server_error_details [String, nil]
-
# @param other_error [Boolean]
-
# @param python_grader_runtime_error [Boolean]
-
# @param python_grader_runtime_error_details [String, nil]
-
# @param python_grader_server_error [Boolean]
-
# @param python_grader_server_error_type [String, nil]
-
# @param sample_parse_error [Boolean]
-
# @param truncated_observation_error [Boolean]
-
# @param unresponsive_reward_error [Boolean]
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
module Alpha
-
# @see OpenAI::Resources::FineTuning::Alpha::Graders#validate
-
1
class GraderValidateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute grader
-
# The grader used for the fine-tuning job.
-
#
-
# @return [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader]
-
1
required :grader, union: -> { OpenAI::FineTuning::Alpha::GraderValidateParams::Grader }
-
-
# @!method initialize(grader:, request_options: {})
-
# @param grader [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] The grader used for the fine-tuning job.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# The grader used for the fine-tuning job.
-
1
module Grader
-
1
extend OpenAI::Internal::Type::Union
-
-
# A StringCheckGrader object that performs a string comparison between input and reference using a specified operation.
-
1
variant -> { OpenAI::Graders::StringCheckGrader }
-
-
# A TextSimilarityGrader object which grades text based on similarity metrics.
-
1
variant -> { OpenAI::Graders::TextSimilarityGrader }
-
-
# A PythonGrader object that runs a python script on the input.
-
1
variant -> { OpenAI::Graders::PythonGrader }
-
-
# A ScoreModelGrader object that uses a model to assign a score to the input.
-
1
variant -> { OpenAI::Graders::ScoreModelGrader }
-
-
# A MultiGrader object combines the output of multiple graders to produce a single score.
-
1
variant -> { OpenAI::Graders::MultiGrader }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader)]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
module Alpha
-
# @see OpenAI::Resources::FineTuning::Alpha::Graders#validate
-
1
class GraderValidateResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute grader
-
# The grader used for the fine-tuning job.
-
#
-
# @return [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader, nil]
-
1
optional :grader, union: -> { OpenAI::Models::FineTuning::Alpha::GraderValidateResponse::Grader }
-
-
# @!method initialize(grader: nil)
-
# @param grader [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] The grader used for the fine-tuning job.
-
-
# The grader used for the fine-tuning job.
-
#
-
# @see OpenAI::Models::FineTuning::Alpha::GraderValidateResponse#grader
-
1
module Grader
-
1
extend OpenAI::Internal::Type::Union
-
-
# A StringCheckGrader object that performs a string comparison between input and reference using a specified operation.
-
1
variant -> { OpenAI::Graders::StringCheckGrader }
-
-
# A TextSimilarityGrader object which grades text based on similarity metrics.
-
1
variant -> { OpenAI::Graders::TextSimilarityGrader }
-
-
# A PythonGrader object that runs a python script on the input.
-
1
variant -> { OpenAI::Graders::PythonGrader }
-
-
# A ScoreModelGrader object that uses a model to assign a score to the input.
-
1
variant -> { OpenAI::Graders::ScoreModelGrader }
-
-
# A MultiGrader object combines the output of multiple graders to produce a single score.
-
1
variant -> { OpenAI::Graders::MultiGrader }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader)]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
module Checkpoints
-
# @see OpenAI::Resources::FineTuning::Checkpoints::Permissions#create
-
1
class PermissionCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute project_ids
-
# The project identifiers to grant access to.
-
#
-
# @return [Array<String>]
-
1
required :project_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(project_ids:, request_options: {})
-
# @param project_ids [Array<String>] The project identifiers to grant access to.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
module Checkpoints
-
# @see OpenAI::Resources::FineTuning::Checkpoints::Permissions#create
-
1
class PermissionCreateResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The permission identifier, which can be referenced in the API endpoints.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the permission was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute object
-
# The object type, which is always "checkpoint.permission".
-
#
-
# @return [Symbol, :"checkpoint.permission"]
-
1
required :object, const: :"checkpoint.permission"
-
-
# @!attribute project_id
-
# The project identifier that the permission is for.
-
#
-
# @return [String]
-
1
required :project_id, String
-
-
# @!method initialize(id:, created_at:, project_id:, object: :"checkpoint.permission")
-
# The `checkpoint.permission` object represents a permission for a fine-tuned
-
# model checkpoint.
-
#
-
# @param id [String] The permission identifier, which can be referenced in the API endpoints.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the permission was created.
-
#
-
# @param project_id [String] The project identifier that the permission is for.
-
#
-
# @param object [Symbol, :"checkpoint.permission"] The object type, which is always "checkpoint.permission".
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
module Checkpoints
-
# @see OpenAI::Resources::FineTuning::Checkpoints::Permissions#delete
-
1
class PermissionDeleteParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute fine_tuned_model_checkpoint
-
#
-
# @return [String]
-
1
required :fine_tuned_model_checkpoint, String
-
-
# @!method initialize(fine_tuned_model_checkpoint:, request_options: {})
-
# @param fine_tuned_model_checkpoint [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
module Checkpoints
-
# @see OpenAI::Resources::FineTuning::Checkpoints::Permissions#delete
-
1
class PermissionDeleteResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The ID of the fine-tuned model checkpoint permission that was deleted.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute deleted
-
# Whether the fine-tuned model checkpoint permission was successfully deleted.
-
#
-
# @return [Boolean]
-
1
required :deleted, OpenAI::Internal::Type::Boolean
-
-
# @!attribute object
-
# The object type, which is always "checkpoint.permission".
-
#
-
# @return [Symbol, :"checkpoint.permission"]
-
1
required :object, const: :"checkpoint.permission"
-
-
# @!method initialize(id:, deleted:, object: :"checkpoint.permission")
-
# @param id [String] The ID of the fine-tuned model checkpoint permission that was deleted.
-
#
-
# @param deleted [Boolean] Whether the fine-tuned model checkpoint permission was successfully deleted.
-
#
-
# @param object [Symbol, :"checkpoint.permission"] The object type, which is always "checkpoint.permission".
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
module Checkpoints
-
# @see OpenAI::Resources::FineTuning::Checkpoints::Permissions#retrieve
-
1
class PermissionRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute after
-
# Identifier for the last permission ID from the previous pagination request.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute limit
-
# Number of permissions to retrieve.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!attribute order
-
# The order in which to retrieve permissions.
-
#
-
# @return [Symbol, OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams::Order, nil]
-
1
optional :order, enum: -> { OpenAI::FineTuning::Checkpoints::PermissionRetrieveParams::Order }
-
-
# @!attribute project_id
-
# The ID of the project to get permissions for.
-
#
-
# @return [String, nil]
-
1
optional :project_id, String
-
-
# @!method initialize(after: nil, limit: nil, order: nil, project_id: nil, request_options: {})
-
# @param after [String] Identifier for the last permission ID from the previous pagination request.
-
#
-
# @param limit [Integer] Number of permissions to retrieve.
-
#
-
# @param order [Symbol, OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams::Order] The order in which to retrieve permissions.
-
#
-
# @param project_id [String] The ID of the project to get permissions for.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# The order in which to retrieve permissions.
-
1
module Order
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASCENDING = :ascending
-
1
DESCENDING = :descending
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
module Checkpoints
-
# @see OpenAI::Resources::FineTuning::Checkpoints::Permissions#retrieve
-
1
class PermissionRetrieveResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
#
-
# @return [Array<OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data>]
-
1
required :data,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data] }
-
-
# @!attribute has_more
-
#
-
# @return [Boolean]
-
1
required :has_more, OpenAI::Internal::Type::Boolean
-
-
# @!attribute object
-
#
-
# @return [Symbol, :list]
-
1
required :object, const: :list
-
-
# @!attribute first_id
-
#
-
# @return [String, nil]
-
1
optional :first_id, String, nil?: true
-
-
# @!attribute last_id
-
#
-
# @return [String, nil]
-
1
optional :last_id, String, nil?: true
-
-
# @!method initialize(data:, has_more:, first_id: nil, last_id: nil, object: :list)
-
# @param data [Array<OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse::Data>]
-
# @param has_more [Boolean]
-
# @param first_id [String, nil]
-
# @param last_id [String, nil]
-
# @param object [Symbol, :list]
-
-
1
class Data < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The permission identifier, which can be referenced in the API endpoints.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the permission was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute object
-
# The object type, which is always "checkpoint.permission".
-
#
-
# @return [Symbol, :"checkpoint.permission"]
-
1
required :object, const: :"checkpoint.permission"
-
-
# @!attribute project_id
-
# The project identifier that the permission is for.
-
#
-
# @return [String]
-
1
required :project_id, String
-
-
# @!method initialize(id:, created_at:, project_id:, object: :"checkpoint.permission")
-
# The `checkpoint.permission` object represents a permission for a fine-tuned
-
# model checkpoint.
-
#
-
# @param id [String] The permission identifier, which can be referenced in the API endpoints.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the permission was created.
-
#
-
# @param project_id [String] The project identifier that the permission is for.
-
#
-
# @param object [Symbol, :"checkpoint.permission"] The object type, which is always "checkpoint.permission".
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
class DpoHyperparameters < OpenAI::Internal::Type::BaseModel
-
# @!attribute batch_size
-
# Number of examples in each batch. A larger batch size means that model
-
# parameters are updated less frequently, but with lower variance.
-
#
-
# @return [Symbol, :auto, Integer, nil]
-
1
optional :batch_size, union: -> { OpenAI::FineTuning::DpoHyperparameters::BatchSize }
-
-
# @!attribute beta
-
# The beta value for the DPO method. A higher beta value will increase the weight
-
# of the penalty between the policy and reference model.
-
#
-
# @return [Symbol, :auto, Float, nil]
-
1
optional :beta, union: -> { OpenAI::FineTuning::DpoHyperparameters::Beta }
-
-
# @!attribute learning_rate_multiplier
-
# Scaling factor for the learning rate. A smaller learning rate may be useful to
-
# avoid overfitting.
-
#
-
# @return [Symbol, :auto, Float, nil]
-
1
optional :learning_rate_multiplier,
-
union: -> { OpenAI::FineTuning::DpoHyperparameters::LearningRateMultiplier }
-
-
# @!attribute n_epochs
-
# The number of epochs to train the model for. An epoch refers to one full cycle
-
# through the training dataset.
-
#
-
# @return [Symbol, :auto, Integer, nil]
-
1
optional :n_epochs, union: -> { OpenAI::FineTuning::DpoHyperparameters::NEpochs }
-
-
# @!method initialize(batch_size: nil, beta: nil, learning_rate_multiplier: nil, n_epochs: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::DpoHyperparameters} for more details.
-
#
-
# The hyperparameters used for the DPO fine-tuning job.
-
#
-
# @param batch_size [Symbol, :auto, Integer] Number of examples in each batch. A larger batch size means that model parameter
-
#
-
# @param beta [Symbol, :auto, Float] The beta value for the DPO method. A higher beta value will increase the weight
-
#
-
# @param learning_rate_multiplier [Symbol, :auto, Float] Scaling factor for the learning rate. A smaller learning rate may be useful to a
-
#
-
# @param n_epochs [Symbol, :auto, Integer] The number of epochs to train the model for. An epoch refers to one full cycle t
-
-
# Number of examples in each batch. A larger batch size means that model
-
# parameters are updated less frequently, but with lower variance.
-
#
-
# @see OpenAI::Models::FineTuning::DpoHyperparameters#batch_size
-
1
module BatchSize
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Integer
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Integer)]
-
end
-
-
# The beta value for the DPO method. A higher beta value will increase the weight
-
# of the penalty between the policy and reference model.
-
#
-
# @see OpenAI::Models::FineTuning::DpoHyperparameters#beta
-
1
module Beta
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Float
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Float)]
-
end
-
-
# Scaling factor for the learning rate. A smaller learning rate may be useful to
-
# avoid overfitting.
-
#
-
# @see OpenAI::Models::FineTuning::DpoHyperparameters#learning_rate_multiplier
-
1
module LearningRateMultiplier
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Float
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Float)]
-
end
-
-
# The number of epochs to train the model for. An epoch refers to one full cycle
-
# through the training dataset.
-
#
-
# @see OpenAI::Models::FineTuning::DpoHyperparameters#n_epochs
-
1
module NEpochs
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Integer
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Integer)]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
class DpoMethod < OpenAI::Internal::Type::BaseModel
-
# @!attribute hyperparameters
-
# The hyperparameters used for the DPO fine-tuning job.
-
#
-
# @return [OpenAI::Models::FineTuning::DpoHyperparameters, nil]
-
1
optional :hyperparameters, -> { OpenAI::FineTuning::DpoHyperparameters }
-
-
# @!method initialize(hyperparameters: nil)
-
# Configuration for the DPO fine-tuning method.
-
#
-
# @param hyperparameters [OpenAI::Models::FineTuning::DpoHyperparameters] The hyperparameters used for the DPO fine-tuning job.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
# @see OpenAI::Resources::FineTuning::Jobs#create
-
1
class FineTuningJob < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The object identifier, which can be referenced in the API endpoints.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the fine-tuning job was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute error
-
# For fine-tuning jobs that have `failed`, this will contain more information on
-
# the cause of the failure.
-
#
-
# @return [OpenAI::Models::FineTuning::FineTuningJob::Error, nil]
-
1
required :error, -> { OpenAI::FineTuning::FineTuningJob::Error }, nil?: true
-
-
# @!attribute fine_tuned_model
-
# The name of the fine-tuned model that is being created. The value will be null
-
# if the fine-tuning job is still running.
-
#
-
# @return [String, nil]
-
1
required :fine_tuned_model, String, nil?: true
-
-
# @!attribute finished_at
-
# The Unix timestamp (in seconds) for when the fine-tuning job was finished. The
-
# value will be null if the fine-tuning job is still running.
-
#
-
# @return [Integer, nil]
-
1
required :finished_at, Integer, nil?: true
-
-
# @!attribute hyperparameters
-
# The hyperparameters used for the fine-tuning job. This value will only be
-
# returned when running `supervised` jobs.
-
#
-
# @return [OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters]
-
1
required :hyperparameters, -> { OpenAI::FineTuning::FineTuningJob::Hyperparameters }
-
-
# @!attribute model
-
# The base model that is being fine-tuned.
-
#
-
# @return [String]
-
1
required :model, String
-
-
# @!attribute object
-
# The object type, which is always "fine_tuning.job".
-
#
-
# @return [Symbol, :"fine_tuning.job"]
-
1
required :object, const: :"fine_tuning.job"
-
-
# @!attribute organization_id
-
# The organization that owns the fine-tuning job.
-
#
-
# @return [String]
-
1
required :organization_id, String
-
-
# @!attribute result_files
-
# The compiled results file ID(s) for the fine-tuning job. You can retrieve the
-
# results with the
-
# [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).
-
#
-
# @return [Array<String>]
-
1
required :result_files, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute seed
-
# The seed used for the fine-tuning job.
-
#
-
# @return [Integer]
-
1
required :seed, Integer
-
-
# @!attribute status
-
# The current status of the fine-tuning job, which can be either
-
# `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.
-
#
-
# @return [Symbol, OpenAI::Models::FineTuning::FineTuningJob::Status]
-
1
required :status, enum: -> { OpenAI::FineTuning::FineTuningJob::Status }
-
-
# @!attribute trained_tokens
-
# The total number of billable tokens processed by this fine-tuning job. The value
-
# will be null if the fine-tuning job is still running.
-
#
-
# @return [Integer, nil]
-
1
required :trained_tokens, Integer, nil?: true
-
-
# @!attribute training_file
-
# The file ID used for training. You can retrieve the training data with the
-
# [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).
-
#
-
# @return [String]
-
1
required :training_file, String
-
-
# @!attribute validation_file
-
# The file ID used for validation. You can retrieve the validation results with
-
# the
-
# [Files API](https://platform.openai.com/docs/api-reference/files/retrieve-contents).
-
#
-
# @return [String, nil]
-
1
required :validation_file, String, nil?: true
-
-
# @!attribute estimated_finish
-
# The Unix timestamp (in seconds) for when the fine-tuning job is estimated to
-
# finish. The value will be null if the fine-tuning job is not running.
-
#
-
# @return [Integer, nil]
-
1
optional :estimated_finish, Integer, nil?: true
-
-
# @!attribute integrations
-
# A list of integrations to enable for this fine-tuning job.
-
#
-
# @return [Array<OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject>, nil]
-
1
optional :integrations,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::FineTuning::FineTuningJobWandbIntegrationObject]
-
},
-
nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute method_
-
# The method used for fine-tuning.
-
#
-
# @return [OpenAI::Models::FineTuning::FineTuningJob::Method, nil]
-
1
optional :method_, -> { OpenAI::FineTuning::FineTuningJob::Method }, api_name: :method
-
-
# @!method initialize(id:, created_at:, error:, fine_tuned_model:, finished_at:, hyperparameters:, model:, organization_id:, result_files:, seed:, status:, trained_tokens:, training_file:, validation_file:, estimated_finish: nil, integrations: nil, metadata: nil, method_: nil, object: :"fine_tuning.job")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::FineTuningJob} for more details.
-
#
-
# The `fine_tuning.job` object represents a fine-tuning job that has been created
-
# through the API.
-
#
-
# @param id [String] The object identifier, which can be referenced in the API endpoints.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the fine-tuning job was created.
-
#
-
# @param error [OpenAI::Models::FineTuning::FineTuningJob::Error, nil] For fine-tuning jobs that have `failed`, this will contain more information on t
-
#
-
# @param fine_tuned_model [String, nil] The name of the fine-tuned model that is being created. The value will be null i
-
#
-
# @param finished_at [Integer, nil] The Unix timestamp (in seconds) for when the fine-tuning job was finished. The v
-
#
-
# @param hyperparameters [OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters] The hyperparameters used for the fine-tuning job. This value will only be return
-
#
-
# @param model [String] The base model that is being fine-tuned.
-
#
-
# @param organization_id [String] The organization that owns the fine-tuning job.
-
#
-
# @param result_files [Array<String>] The compiled results file ID(s) for the fine-tuning job. You can retrieve the re
-
#
-
# @param seed [Integer] The seed used for the fine-tuning job.
-
#
-
# @param status [Symbol, OpenAI::Models::FineTuning::FineTuningJob::Status] The current status of the fine-tuning job, which can be either `validating_files
-
#
-
# @param trained_tokens [Integer, nil] The total number of billable tokens processed by this fine-tuning job. The value
-
#
-
# @param training_file [String] The file ID used for training. You can retrieve the training data with the [File
-
#
-
# @param validation_file [String, nil] The file ID used for validation. You can retrieve the validation results with th
-
#
-
# @param estimated_finish [Integer, nil] The Unix timestamp (in seconds) for when the fine-tuning job is estimated to fin
-
#
-
# @param integrations [Array<OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject>, nil] A list of integrations to enable for this fine-tuning job.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param method_ [OpenAI::Models::FineTuning::FineTuningJob::Method] The method used for fine-tuning.
-
#
-
# @param object [Symbol, :"fine_tuning.job"] The object type, which is always "fine_tuning.job".
-
-
# @see OpenAI::Models::FineTuning::FineTuningJob#error
-
1
class Error < OpenAI::Internal::Type::BaseModel
-
# @!attribute code
-
# A machine-readable error code.
-
#
-
# @return [String]
-
1
required :code, String
-
-
# @!attribute message
-
# A human-readable error message.
-
#
-
# @return [String]
-
1
required :message, String
-
-
# @!attribute param
-
# The parameter that was invalid, usually `training_file` or `validation_file`.
-
# This field will be null if the failure was not parameter-specific.
-
#
-
# @return [String, nil]
-
1
required :param, String, nil?: true
-
-
# @!method initialize(code:, message:, param:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::FineTuningJob::Error} for more details.
-
#
-
# For fine-tuning jobs that have `failed`, this will contain more information on
-
# the cause of the failure.
-
#
-
# @param code [String] A machine-readable error code.
-
#
-
# @param message [String] A human-readable error message.
-
#
-
# @param param [String, nil] The parameter that was invalid, usually `training_file` or `validation_file`. Th
-
end
-
-
# @see OpenAI::Models::FineTuning::FineTuningJob#hyperparameters
-
1
class Hyperparameters < OpenAI::Internal::Type::BaseModel
-
# @!attribute batch_size
-
# Number of examples in each batch. A larger batch size means that model
-
# parameters are updated less frequently, but with lower variance.
-
#
-
# @return [Symbol, :auto, Integer, nil]
-
1
optional :batch_size,
-
union: -> { OpenAI::FineTuning::FineTuningJob::Hyperparameters::BatchSize },
-
nil?: true
-
-
# @!attribute learning_rate_multiplier
-
# Scaling factor for the learning rate. A smaller learning rate may be useful to
-
# avoid overfitting.
-
#
-
# @return [Symbol, :auto, Float, nil]
-
1
optional :learning_rate_multiplier,
-
union: -> { OpenAI::FineTuning::FineTuningJob::Hyperparameters::LearningRateMultiplier }
-
-
# @!attribute n_epochs
-
# The number of epochs to train the model for. An epoch refers to one full cycle
-
# through the training dataset.
-
#
-
# @return [Symbol, :auto, Integer, nil]
-
1
optional :n_epochs, union: -> { OpenAI::FineTuning::FineTuningJob::Hyperparameters::NEpochs }
-
-
# @!method initialize(batch_size: nil, learning_rate_multiplier: nil, n_epochs: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters} for more details.
-
#
-
# The hyperparameters used for the fine-tuning job. This value will only be
-
# returned when running `supervised` jobs.
-
#
-
# @param batch_size [Symbol, :auto, Integer, nil] Number of examples in each batch. A larger batch size means that model parameter
-
#
-
# @param learning_rate_multiplier [Symbol, :auto, Float] Scaling factor for the learning rate. A smaller learning rate may be useful to a
-
#
-
# @param n_epochs [Symbol, :auto, Integer] The number of epochs to train the model for. An epoch refers to one full cycle
-
-
# Number of examples in each batch. A larger batch size means that model
-
# parameters are updated less frequently, but with lower variance.
-
#
-
# @see OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters#batch_size
-
1
module BatchSize
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Integer
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Integer)]
-
end
-
-
# Scaling factor for the learning rate. A smaller learning rate may be useful to
-
# avoid overfitting.
-
#
-
# @see OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters#learning_rate_multiplier
-
1
module LearningRateMultiplier
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Float
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Float)]
-
end
-
-
# The number of epochs to train the model for. An epoch refers to one full cycle
-
# through the training dataset.
-
#
-
# @see OpenAI::Models::FineTuning::FineTuningJob::Hyperparameters#n_epochs
-
1
module NEpochs
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Integer
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Integer)]
-
end
-
end
-
-
# The current status of the fine-tuning job, which can be either
-
# `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`.
-
#
-
# @see OpenAI::Models::FineTuning::FineTuningJob#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
VALIDATING_FILES = :validating_files
-
1
QUEUED = :queued
-
1
RUNNING = :running
-
1
SUCCEEDED = :succeeded
-
1
FAILED = :failed
-
1
CANCELLED = :cancelled
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @see OpenAI::Models::FineTuning::FineTuningJob#method_
-
1
class Method < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of method. Is either `supervised`, `dpo`, or `reinforcement`.
-
#
-
# @return [Symbol, OpenAI::Models::FineTuning::FineTuningJob::Method::Type]
-
1
required :type, enum: -> { OpenAI::FineTuning::FineTuningJob::Method::Type }
-
-
# @!attribute dpo
-
# Configuration for the DPO fine-tuning method.
-
#
-
# @return [OpenAI::Models::FineTuning::DpoMethod, nil]
-
1
optional :dpo, -> { OpenAI::FineTuning::DpoMethod }
-
-
# @!attribute reinforcement
-
# Configuration for the reinforcement fine-tuning method.
-
#
-
# @return [OpenAI::Models::FineTuning::ReinforcementMethod, nil]
-
1
optional :reinforcement, -> { OpenAI::FineTuning::ReinforcementMethod }
-
-
# @!attribute supervised
-
# Configuration for the supervised fine-tuning method.
-
#
-
# @return [OpenAI::Models::FineTuning::SupervisedMethod, nil]
-
1
optional :supervised, -> { OpenAI::FineTuning::SupervisedMethod }
-
-
# @!method initialize(type:, dpo: nil, reinforcement: nil, supervised: nil)
-
# The method used for fine-tuning.
-
#
-
# @param type [Symbol, OpenAI::Models::FineTuning::FineTuningJob::Method::Type] The type of method. Is either `supervised`, `dpo`, or `reinforcement`.
-
#
-
# @param dpo [OpenAI::Models::FineTuning::DpoMethod] Configuration for the DPO fine-tuning method.
-
#
-
# @param reinforcement [OpenAI::Models::FineTuning::ReinforcementMethod] Configuration for the reinforcement fine-tuning method.
-
#
-
# @param supervised [OpenAI::Models::FineTuning::SupervisedMethod] Configuration for the supervised fine-tuning method.
-
-
# The type of method. Is either `supervised`, `dpo`, or `reinforcement`.
-
#
-
# @see OpenAI::Models::FineTuning::FineTuningJob::Method#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
SUPERVISED = :supervised
-
1
DPO = :dpo
-
1
REINFORCEMENT = :reinforcement
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
-
1
FineTuningJob = FineTuning::FineTuningJob
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
# @see OpenAI::Resources::FineTuning::Jobs#list_events
-
1
class FineTuningJobEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The object identifier.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the fine-tuning job was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute level
-
# The log level of the event.
-
#
-
# @return [Symbol, OpenAI::Models::FineTuning::FineTuningJobEvent::Level]
-
1
required :level, enum: -> { OpenAI::FineTuning::FineTuningJobEvent::Level }
-
-
# @!attribute message
-
# The message of the event.
-
#
-
# @return [String]
-
1
required :message, String
-
-
# @!attribute object
-
# The object type, which is always "fine_tuning.job.event".
-
#
-
# @return [Symbol, :"fine_tuning.job.event"]
-
1
required :object, const: :"fine_tuning.job.event"
-
-
# @!attribute data
-
# The data associated with the event.
-
#
-
# @return [Object, nil]
-
1
optional :data, OpenAI::Internal::Type::Unknown
-
-
# @!attribute type
-
# The type of event.
-
#
-
# @return [Symbol, OpenAI::Models::FineTuning::FineTuningJobEvent::Type, nil]
-
1
optional :type, enum: -> { OpenAI::FineTuning::FineTuningJobEvent::Type }
-
-
# @!method initialize(id:, created_at:, level:, message:, data: nil, type: nil, object: :"fine_tuning.job.event")
-
# Fine-tuning job event object
-
#
-
# @param id [String] The object identifier.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the fine-tuning job was created.
-
#
-
# @param level [Symbol, OpenAI::Models::FineTuning::FineTuningJobEvent::Level] The log level of the event.
-
#
-
# @param message [String] The message of the event.
-
#
-
# @param data [Object] The data associated with the event.
-
#
-
# @param type [Symbol, OpenAI::Models::FineTuning::FineTuningJobEvent::Type] The type of event.
-
#
-
# @param object [Symbol, :"fine_tuning.job.event"] The object type, which is always "fine_tuning.job.event".
-
-
# The log level of the event.
-
#
-
# @see OpenAI::Models::FineTuning::FineTuningJobEvent#level
-
1
module Level
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
INFO = :info
-
1
WARN = :warn
-
1
ERROR = :error
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The type of event.
-
#
-
# @see OpenAI::Models::FineTuning::FineTuningJobEvent#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
MESSAGE = :message
-
1
METRICS = :metrics
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
-
1
FineTuningJobEvent = FineTuning::FineTuningJobEvent
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
FineTuningJobIntegration = OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject
-
end
-
-
1
FineTuningJobIntegration = FineTuning::FineTuningJobIntegration
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
class FineTuningJobWandbIntegration < OpenAI::Internal::Type::BaseModel
-
# @!attribute project
-
# The name of the project that the new run will be created under.
-
#
-
# @return [String]
-
1
required :project, String
-
-
# @!attribute entity
-
# The entity to use for the run. This allows you to set the team or username of
-
# the WandB user that you would like associated with the run. If not set, the
-
# default entity for the registered WandB API key is used.
-
#
-
# @return [String, nil]
-
1
optional :entity, String, nil?: true
-
-
# @!attribute name
-
# A display name to set for the run. If not set, we will use the Job ID as the
-
# name.
-
#
-
# @return [String, nil]
-
1
optional :name, String, nil?: true
-
-
# @!attribute tags
-
# A list of tags to be attached to the newly created run. These tags are passed
-
# through directly to WandB. Some default tags are generated by OpenAI:
-
# "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}".
-
#
-
# @return [Array<String>, nil]
-
1
optional :tags, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(project:, entity: nil, name: nil, tags: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::FineTuningJobWandbIntegration} for more details.
-
#
-
# The settings for your integration with Weights and Biases. This payload
-
# specifies the project that metrics will be sent to. Optionally, you can set an
-
# explicit display name for your run, add tags to your run, and set a default
-
# entity (team, username, etc) to be associated with your run.
-
#
-
# @param project [String] The name of the project that the new run will be created under.
-
#
-
# @param entity [String, nil] The entity to use for the run. This allows you to set the team or username of th
-
#
-
# @param name [String, nil] A display name to set for the run. If not set, we will use the Job ID as the nam
-
#
-
# @param tags [Array<String>] A list of tags to be attached to the newly created run. These tags are passed th
-
end
-
end
-
-
1
FineTuningJobWandbIntegration = FineTuning::FineTuningJobWandbIntegration
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
class FineTuningJobWandbIntegrationObject < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of the integration being enabled for the fine-tuning job
-
#
-
# @return [Symbol, :wandb]
-
1
required :type, const: :wandb
-
-
# @!attribute wandb
-
# The settings for your integration with Weights and Biases. This payload
-
# specifies the project that metrics will be sent to. Optionally, you can set an
-
# explicit display name for your run, add tags to your run, and set a default
-
# entity (team, username, etc) to be associated with your run.
-
#
-
# @return [OpenAI::Models::FineTuning::FineTuningJobWandbIntegration]
-
1
required :wandb, -> { OpenAI::FineTuning::FineTuningJobWandbIntegration }
-
-
# @!method initialize(wandb:, type: :wandb)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::FineTuningJobWandbIntegrationObject} for more
-
# details.
-
#
-
# @param wandb [OpenAI::Models::FineTuning::FineTuningJobWandbIntegration] The settings for your integration with Weights and Biases. This payload specifie
-
#
-
# @param type [Symbol, :wandb] The type of the integration being enabled for the fine-tuning job
-
end
-
end
-
-
1
FineTuningJobWandbIntegrationObject = FineTuning::FineTuningJobWandbIntegrationObject
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
# @see OpenAI::Resources::FineTuning::Jobs#cancel
-
1
class JobCancelParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
# @see OpenAI::Resources::FineTuning::Jobs#create
-
1
class JobCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute model
-
# The name of the model to fine-tune. You can select one of the
-
# [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
-
#
-
# @return [String, Symbol, OpenAI::Models::FineTuning::JobCreateParams::Model]
-
1
required :model, union: -> { OpenAI::FineTuning::JobCreateParams::Model }
-
-
# @!attribute training_file
-
# The ID of an uploaded file that contains training data.
-
#
-
# See [upload file](https://platform.openai.com/docs/api-reference/files/create)
-
# for how to upload a file.
-
#
-
# Your dataset must be formatted as a JSONL file. Additionally, you must upload
-
# your file with the purpose `fine-tune`.
-
#
-
# The contents of the file should differ depending on if the model uses the
-
# [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input),
-
# [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
-
# format, or if the fine-tuning method uses the
-
# [preference](https://platform.openai.com/docs/api-reference/fine-tuning/preference-input)
-
# format.
-
#
-
# See the
-
# [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
-
# for more details.
-
#
-
# @return [String]
-
1
required :training_file, String
-
-
# @!attribute hyperparameters
-
# @deprecated
-
#
-
# The hyperparameters used for the fine-tuning job. This value is now deprecated
-
# in favor of `method`, and should be passed in under the `method` parameter.
-
#
-
# @return [OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters, nil]
-
1
optional :hyperparameters, -> { OpenAI::FineTuning::JobCreateParams::Hyperparameters }
-
-
# @!attribute integrations
-
# A list of integrations to enable for your fine-tuning job.
-
#
-
# @return [Array<OpenAI::Models::FineTuning::JobCreateParams::Integration>, nil]
-
1
optional :integrations,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::FineTuning::JobCreateParams::Integration] },
-
nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute method_
-
# The method used for fine-tuning.
-
#
-
# @return [OpenAI::Models::FineTuning::JobCreateParams::Method, nil]
-
1
optional :method_, -> { OpenAI::FineTuning::JobCreateParams::Method }, api_name: :method
-
-
# @!attribute seed
-
# The seed controls the reproducibility of the job. Passing in the same seed and
-
# job parameters should produce the same results, but may differ in rare cases. If
-
# a seed is not specified, one will be generated for you.
-
#
-
# @return [Integer, nil]
-
1
optional :seed, Integer, nil?: true
-
-
# @!attribute suffix
-
# A string of up to 64 characters that will be added to your fine-tuned model
-
# name.
-
#
-
# For example, a `suffix` of "custom-model-name" would produce a model name like
-
# `ft:gpt-4o-mini:openai:custom-model-name:7p4lURel`.
-
#
-
# @return [String, nil]
-
1
optional :suffix, String, nil?: true
-
-
# @!attribute validation_file
-
# The ID of an uploaded file that contains validation data.
-
#
-
# If you provide this file, the data is used to generate validation metrics
-
# periodically during fine-tuning. These metrics can be viewed in the fine-tuning
-
# results file. The same data should not be present in both train and validation
-
# files.
-
#
-
# Your dataset must be formatted as a JSONL file. You must upload your file with
-
# the purpose `fine-tune`.
-
#
-
# See the
-
# [fine-tuning guide](https://platform.openai.com/docs/guides/model-optimization)
-
# for more details.
-
#
-
# @return [String, nil]
-
1
optional :validation_file, String, nil?: true
-
-
# @!method initialize(model:, training_file:, hyperparameters: nil, integrations: nil, metadata: nil, method_: nil, seed: nil, suffix: nil, validation_file: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::JobCreateParams} for more details.
-
#
-
# @param model [String, Symbol, OpenAI::Models::FineTuning::JobCreateParams::Model] The name of the model to fine-tune. You can select one of the
-
#
-
# @param training_file [String] The ID of an uploaded file that contains training data.
-
#
-
# @param hyperparameters [OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters] The hyperparameters used for the fine-tuning job.
-
#
-
# @param integrations [Array<OpenAI::Models::FineTuning::JobCreateParams::Integration>, nil] A list of integrations to enable for your fine-tuning job.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param method_ [OpenAI::Models::FineTuning::JobCreateParams::Method] The method used for fine-tuning.
-
#
-
# @param seed [Integer, nil] The seed controls the reproducibility of the job. Passing in the same seed and j
-
#
-
# @param suffix [String, nil] A string of up to 64 characters that will be added to your fine-tuned model name
-
#
-
# @param validation_file [String, nil] The ID of an uploaded file that contains validation data.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# The name of the model to fine-tune. You can select one of the
-
# [supported models](https://platform.openai.com/docs/guides/fine-tuning#which-models-can-be-fine-tuned).
-
1
module Model
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
1
variant const: -> { OpenAI::Models::FineTuning::JobCreateParams::Model::BABBAGE_002 }
-
-
1
variant const: -> { OpenAI::Models::FineTuning::JobCreateParams::Model::DAVINCI_002 }
-
-
1
variant const: -> { OpenAI::Models::FineTuning::JobCreateParams::Model::GPT_3_5_TURBO }
-
-
1
variant const: -> { OpenAI::Models::FineTuning::JobCreateParams::Model::GPT_4O_MINI }
-
-
# @!method self.variants
-
# @return [Array(String, Symbol)]
-
-
1
define_sorbet_constant!(:Variants) do
-
T.type_alias { T.any(String, OpenAI::FineTuning::JobCreateParams::Model::TaggedSymbol) }
-
end
-
-
# @!group
-
-
1
BABBAGE_002 = :"babbage-002"
-
1
DAVINCI_002 = :"davinci-002"
-
1
GPT_3_5_TURBO = :"gpt-3.5-turbo"
-
1
GPT_4O_MINI = :"gpt-4o-mini"
-
-
# @!endgroup
-
end
-
-
# @deprecated
-
1
class Hyperparameters < OpenAI::Internal::Type::BaseModel
-
# @!attribute batch_size
-
# Number of examples in each batch. A larger batch size means that model
-
# parameters are updated less frequently, but with lower variance.
-
#
-
# @return [Symbol, :auto, Integer, nil]
-
1
optional :batch_size, union: -> { OpenAI::FineTuning::JobCreateParams::Hyperparameters::BatchSize }
-
-
# @!attribute learning_rate_multiplier
-
# Scaling factor for the learning rate. A smaller learning rate may be useful to
-
# avoid overfitting.
-
#
-
# @return [Symbol, :auto, Float, nil]
-
1
optional :learning_rate_multiplier,
-
union: -> { OpenAI::FineTuning::JobCreateParams::Hyperparameters::LearningRateMultiplier }
-
-
# @!attribute n_epochs
-
# The number of epochs to train the model for. An epoch refers to one full cycle
-
# through the training dataset.
-
#
-
# @return [Symbol, :auto, Integer, nil]
-
1
optional :n_epochs, union: -> { OpenAI::FineTuning::JobCreateParams::Hyperparameters::NEpochs }
-
-
# @!method initialize(batch_size: nil, learning_rate_multiplier: nil, n_epochs: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters} for more details.
-
#
-
# The hyperparameters used for the fine-tuning job. This value is now deprecated
-
# in favor of `method`, and should be passed in under the `method` parameter.
-
#
-
# @param batch_size [Symbol, :auto, Integer] Number of examples in each batch. A larger batch size means that model parameter
-
#
-
# @param learning_rate_multiplier [Symbol, :auto, Float] Scaling factor for the learning rate. A smaller learning rate may be useful to a
-
#
-
# @param n_epochs [Symbol, :auto, Integer] The number of epochs to train the model for. An epoch refers to one full cycle
-
-
# Number of examples in each batch. A larger batch size means that model
-
# parameters are updated less frequently, but with lower variance.
-
#
-
# @see OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters#batch_size
-
1
module BatchSize
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Integer
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Integer)]
-
end
-
-
# Scaling factor for the learning rate. A smaller learning rate may be useful to
-
# avoid overfitting.
-
#
-
# @see OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters#learning_rate_multiplier
-
1
module LearningRateMultiplier
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Float
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Float)]
-
end
-
-
# The number of epochs to train the model for. An epoch refers to one full cycle
-
# through the training dataset.
-
#
-
# @see OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters#n_epochs
-
1
module NEpochs
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Integer
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Integer)]
-
end
-
end
-
-
1
class Integration < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of integration to enable. Currently, only "wandb" (Weights and Biases)
-
# is supported.
-
#
-
# @return [Symbol, :wandb]
-
1
required :type, const: :wandb
-
-
# @!attribute wandb
-
# The settings for your integration with Weights and Biases. This payload
-
# specifies the project that metrics will be sent to. Optionally, you can set an
-
# explicit display name for your run, add tags to your run, and set a default
-
# entity (team, username, etc) to be associated with your run.
-
#
-
# @return [OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb]
-
1
required :wandb, -> { OpenAI::FineTuning::JobCreateParams::Integration::Wandb }
-
-
# @!method initialize(wandb:, type: :wandb)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::JobCreateParams::Integration} for more details.
-
#
-
# @param wandb [OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb] The settings for your integration with Weights and Biases. This payload specifie
-
#
-
# @param type [Symbol, :wandb] The type of integration to enable. Currently, only "wandb" (Weights and Biases)
-
-
# @see OpenAI::Models::FineTuning::JobCreateParams::Integration#wandb
-
1
class Wandb < OpenAI::Internal::Type::BaseModel
-
# @!attribute project
-
# The name of the project that the new run will be created under.
-
#
-
# @return [String]
-
1
required :project, String
-
-
# @!attribute entity
-
# The entity to use for the run. This allows you to set the team or username of
-
# the WandB user that you would like associated with the run. If not set, the
-
# default entity for the registered WandB API key is used.
-
#
-
# @return [String, nil]
-
1
optional :entity, String, nil?: true
-
-
# @!attribute name
-
# A display name to set for the run. If not set, we will use the Job ID as the
-
# name.
-
#
-
# @return [String, nil]
-
1
optional :name, String, nil?: true
-
-
# @!attribute tags
-
# A list of tags to be attached to the newly created run. These tags are passed
-
# through directly to WandB. Some default tags are generated by OpenAI:
-
# "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}".
-
#
-
# @return [Array<String>, nil]
-
1
optional :tags, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(project:, entity: nil, name: nil, tags: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::JobCreateParams::Integration::Wandb} for more
-
# details.
-
#
-
# The settings for your integration with Weights and Biases. This payload
-
# specifies the project that metrics will be sent to. Optionally, you can set an
-
# explicit display name for your run, add tags to your run, and set a default
-
# entity (team, username, etc) to be associated with your run.
-
#
-
# @param project [String] The name of the project that the new run will be created under.
-
#
-
# @param entity [String, nil] The entity to use for the run. This allows you to set the team or username of th
-
#
-
# @param name [String, nil] A display name to set for the run. If not set, we will use the Job ID as the nam
-
#
-
# @param tags [Array<String>] A list of tags to be attached to the newly created run. These tags are passed th
-
end
-
end
-
-
1
class Method < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of method. Is either `supervised`, `dpo`, or `reinforcement`.
-
#
-
# @return [Symbol, OpenAI::Models::FineTuning::JobCreateParams::Method::Type]
-
1
required :type, enum: -> { OpenAI::FineTuning::JobCreateParams::Method::Type }
-
-
# @!attribute dpo
-
# Configuration for the DPO fine-tuning method.
-
#
-
# @return [OpenAI::Models::FineTuning::DpoMethod, nil]
-
1
optional :dpo, -> { OpenAI::FineTuning::DpoMethod }
-
-
# @!attribute reinforcement
-
# Configuration for the reinforcement fine-tuning method.
-
#
-
# @return [OpenAI::Models::FineTuning::ReinforcementMethod, nil]
-
1
optional :reinforcement, -> { OpenAI::FineTuning::ReinforcementMethod }
-
-
# @!attribute supervised
-
# Configuration for the supervised fine-tuning method.
-
#
-
# @return [OpenAI::Models::FineTuning::SupervisedMethod, nil]
-
1
optional :supervised, -> { OpenAI::FineTuning::SupervisedMethod }
-
-
# @!method initialize(type:, dpo: nil, reinforcement: nil, supervised: nil)
-
# The method used for fine-tuning.
-
#
-
# @param type [Symbol, OpenAI::Models::FineTuning::JobCreateParams::Method::Type] The type of method. Is either `supervised`, `dpo`, or `reinforcement`.
-
#
-
# @param dpo [OpenAI::Models::FineTuning::DpoMethod] Configuration for the DPO fine-tuning method.
-
#
-
# @param reinforcement [OpenAI::Models::FineTuning::ReinforcementMethod] Configuration for the reinforcement fine-tuning method.
-
#
-
# @param supervised [OpenAI::Models::FineTuning::SupervisedMethod] Configuration for the supervised fine-tuning method.
-
-
# The type of method. Is either `supervised`, `dpo`, or `reinforcement`.
-
#
-
# @see OpenAI::Models::FineTuning::JobCreateParams::Method#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
SUPERVISED = :supervised
-
1
DPO = :dpo
-
1
REINFORCEMENT = :reinforcement
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
# @see OpenAI::Resources::FineTuning::Jobs#list_events
-
1
class JobListEventsParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute after
-
# Identifier for the last event from the previous pagination request.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute limit
-
# Number of events to retrieve.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!method initialize(after: nil, limit: nil, request_options: {})
-
# @param after [String] Identifier for the last event from the previous pagination request.
-
#
-
# @param limit [Integer] Number of events to retrieve.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
# @see OpenAI::Resources::FineTuning::Jobs#list
-
1
class JobListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute after
-
# Identifier for the last job from the previous pagination request.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute limit
-
# Number of fine-tuning jobs to retrieve.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!attribute metadata
-
# Optional metadata filter. To filter, use the syntax `metadata[k]=v`.
-
# Alternatively, set `metadata=null` to indicate no metadata.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!method initialize(after: nil, limit: nil, metadata: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::JobListParams} for more details.
-
#
-
# @param after [String] Identifier for the last job from the previous pagination request.
-
#
-
# @param limit [Integer] Number of fine-tuning jobs to retrieve.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Optional metadata filter. To filter, use the syntax `metadata[k]=v`. Alternative
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
# @see OpenAI::Resources::FineTuning::Jobs#pause
-
1
class JobPauseParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
# @see OpenAI::Resources::FineTuning::Jobs#resume
-
1
class JobResumeParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
# @see OpenAI::Resources::FineTuning::Jobs#retrieve
-
1
class JobRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
module Jobs
-
# @see OpenAI::Resources::FineTuning::Jobs::Checkpoints#list
-
1
class CheckpointListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute after
-
# Identifier for the last checkpoint ID from the previous pagination request.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute limit
-
# Number of checkpoints to retrieve.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!method initialize(after: nil, limit: nil, request_options: {})
-
# @param after [String] Identifier for the last checkpoint ID from the previous pagination request.
-
#
-
# @param limit [Integer] Number of checkpoints to retrieve.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
module Jobs
-
# @see OpenAI::Resources::FineTuning::Jobs::Checkpoints#list
-
1
class FineTuningJobCheckpoint < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The checkpoint identifier, which can be referenced in the API endpoints.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the checkpoint was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute fine_tuned_model_checkpoint
-
# The name of the fine-tuned checkpoint model that is created.
-
#
-
# @return [String]
-
1
required :fine_tuned_model_checkpoint, String
-
-
# @!attribute fine_tuning_job_id
-
# The name of the fine-tuning job that this checkpoint was created from.
-
#
-
# @return [String]
-
1
required :fine_tuning_job_id, String
-
-
# @!attribute metrics
-
# Metrics at the step number during the fine-tuning job.
-
#
-
# @return [OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics]
-
1
required :metrics, -> { OpenAI::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics }
-
-
# @!attribute object
-
# The object type, which is always "fine_tuning.job.checkpoint".
-
#
-
# @return [Symbol, :"fine_tuning.job.checkpoint"]
-
1
required :object, const: :"fine_tuning.job.checkpoint"
-
-
# @!attribute step_number
-
# The step number that the checkpoint was created at.
-
#
-
# @return [Integer]
-
1
required :step_number, Integer
-
-
# @!method initialize(id:, created_at:, fine_tuned_model_checkpoint:, fine_tuning_job_id:, metrics:, step_number:, object: :"fine_tuning.job.checkpoint")
-
# The `fine_tuning.job.checkpoint` object represents a model checkpoint for a
-
# fine-tuning job that is ready to use.
-
#
-
# @param id [String] The checkpoint identifier, which can be referenced in the API endpoints.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the checkpoint was created.
-
#
-
# @param fine_tuned_model_checkpoint [String] The name of the fine-tuned checkpoint model that is created.
-
#
-
# @param fine_tuning_job_id [String] The name of the fine-tuning job that this checkpoint was created from.
-
#
-
# @param metrics [OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint::Metrics] Metrics at the step number during the fine-tuning job.
-
#
-
# @param step_number [Integer] The step number that the checkpoint was created at.
-
#
-
# @param object [Symbol, :"fine_tuning.job.checkpoint"] The object type, which is always "fine_tuning.job.checkpoint".
-
-
# @see OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint#metrics
-
1
class Metrics < OpenAI::Internal::Type::BaseModel
-
# @!attribute full_valid_loss
-
#
-
# @return [Float, nil]
-
1
optional :full_valid_loss, Float
-
-
# @!attribute full_valid_mean_token_accuracy
-
#
-
# @return [Float, nil]
-
1
optional :full_valid_mean_token_accuracy, Float
-
-
# @!attribute step
-
#
-
# @return [Float, nil]
-
1
optional :step, Float
-
-
# @!attribute train_loss
-
#
-
# @return [Float, nil]
-
1
optional :train_loss, Float
-
-
# @!attribute train_mean_token_accuracy
-
#
-
# @return [Float, nil]
-
1
optional :train_mean_token_accuracy, Float
-
-
# @!attribute valid_loss
-
#
-
# @return [Float, nil]
-
1
optional :valid_loss, Float
-
-
# @!attribute valid_mean_token_accuracy
-
#
-
# @return [Float, nil]
-
1
optional :valid_mean_token_accuracy, Float
-
-
# @!method initialize(full_valid_loss: nil, full_valid_mean_token_accuracy: nil, step: nil, train_loss: nil, train_mean_token_accuracy: nil, valid_loss: nil, valid_mean_token_accuracy: nil)
-
# Metrics at the step number during the fine-tuning job.
-
#
-
# @param full_valid_loss [Float]
-
# @param full_valid_mean_token_accuracy [Float]
-
# @param step [Float]
-
# @param train_loss [Float]
-
# @param train_mean_token_accuracy [Float]
-
# @param valid_loss [Float]
-
# @param valid_mean_token_accuracy [Float]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
class ReinforcementHyperparameters < OpenAI::Internal::Type::BaseModel
-
# @!attribute batch_size
-
# Number of examples in each batch. A larger batch size means that model
-
# parameters are updated less frequently, but with lower variance.
-
#
-
# @return [Symbol, :auto, Integer, nil]
-
1
optional :batch_size, union: -> { OpenAI::FineTuning::ReinforcementHyperparameters::BatchSize }
-
-
# @!attribute compute_multiplier
-
# Multiplier on amount of compute used for exploring search space during training.
-
#
-
# @return [Symbol, :auto, Float, nil]
-
1
optional :compute_multiplier,
-
union: -> { OpenAI::FineTuning::ReinforcementHyperparameters::ComputeMultiplier }
-
-
# @!attribute eval_interval
-
# The number of training steps between evaluation runs.
-
#
-
# @return [Symbol, :auto, Integer, nil]
-
1
optional :eval_interval, union: -> { OpenAI::FineTuning::ReinforcementHyperparameters::EvalInterval }
-
-
# @!attribute eval_samples
-
# Number of evaluation samples to generate per training step.
-
#
-
# @return [Symbol, :auto, Integer, nil]
-
1
optional :eval_samples, union: -> { OpenAI::FineTuning::ReinforcementHyperparameters::EvalSamples }
-
-
# @!attribute learning_rate_multiplier
-
# Scaling factor for the learning rate. A smaller learning rate may be useful to
-
# avoid overfitting.
-
#
-
# @return [Symbol, :auto, Float, nil]
-
1
optional :learning_rate_multiplier,
-
union: -> { OpenAI::FineTuning::ReinforcementHyperparameters::LearningRateMultiplier }
-
-
# @!attribute n_epochs
-
# The number of epochs to train the model for. An epoch refers to one full cycle
-
# through the training dataset.
-
#
-
# @return [Symbol, :auto, Integer, nil]
-
1
optional :n_epochs, union: -> { OpenAI::FineTuning::ReinforcementHyperparameters::NEpochs }
-
-
# @!attribute reasoning_effort
-
# Level of reasoning effort.
-
#
-
# @return [Symbol, OpenAI::Models::FineTuning::ReinforcementHyperparameters::ReasoningEffort, nil]
-
1
optional :reasoning_effort,
-
enum: -> {
-
OpenAI::FineTuning::ReinforcementHyperparameters::ReasoningEffort
-
}
-
-
# @!method initialize(batch_size: nil, compute_multiplier: nil, eval_interval: nil, eval_samples: nil, learning_rate_multiplier: nil, n_epochs: nil, reasoning_effort: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::ReinforcementHyperparameters} for more details.
-
#
-
# The hyperparameters used for the reinforcement fine-tuning job.
-
#
-
# @param batch_size [Symbol, :auto, Integer] Number of examples in each batch. A larger batch size means that model parameter
-
#
-
# @param compute_multiplier [Symbol, :auto, Float] Multiplier on amount of compute used for exploring search space during training.
-
#
-
# @param eval_interval [Symbol, :auto, Integer] The number of training steps between evaluation runs.
-
#
-
# @param eval_samples [Symbol, :auto, Integer] Number of evaluation samples to generate per training step.
-
#
-
# @param learning_rate_multiplier [Symbol, :auto, Float] Scaling factor for the learning rate. A smaller learning rate may be useful to a
-
#
-
# @param n_epochs [Symbol, :auto, Integer] The number of epochs to train the model for. An epoch refers to one full cycle t
-
#
-
# @param reasoning_effort [Symbol, OpenAI::Models::FineTuning::ReinforcementHyperparameters::ReasoningEffort] Level of reasoning effort.
-
-
# Number of examples in each batch. A larger batch size means that model
-
# parameters are updated less frequently, but with lower variance.
-
#
-
# @see OpenAI::Models::FineTuning::ReinforcementHyperparameters#batch_size
-
1
module BatchSize
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Integer
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Integer)]
-
end
-
-
# Multiplier on amount of compute used for exploring search space during training.
-
#
-
# @see OpenAI::Models::FineTuning::ReinforcementHyperparameters#compute_multiplier
-
1
module ComputeMultiplier
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Float
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Float)]
-
end
-
-
# The number of training steps between evaluation runs.
-
#
-
# @see OpenAI::Models::FineTuning::ReinforcementHyperparameters#eval_interval
-
1
module EvalInterval
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Integer
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Integer)]
-
end
-
-
# Number of evaluation samples to generate per training step.
-
#
-
# @see OpenAI::Models::FineTuning::ReinforcementHyperparameters#eval_samples
-
1
module EvalSamples
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Integer
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Integer)]
-
end
-
-
# Scaling factor for the learning rate. A smaller learning rate may be useful to
-
# avoid overfitting.
-
#
-
# @see OpenAI::Models::FineTuning::ReinforcementHyperparameters#learning_rate_multiplier
-
1
module LearningRateMultiplier
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Float
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Float)]
-
end
-
-
# The number of epochs to train the model for. An epoch refers to one full cycle
-
# through the training dataset.
-
#
-
# @see OpenAI::Models::FineTuning::ReinforcementHyperparameters#n_epochs
-
1
module NEpochs
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Integer
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Integer)]
-
end
-
-
# Level of reasoning effort.
-
#
-
# @see OpenAI::Models::FineTuning::ReinforcementHyperparameters#reasoning_effort
-
1
module ReasoningEffort
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
DEFAULT = :default
-
1
LOW = :low
-
1
MEDIUM = :medium
-
1
HIGH = :high
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
class ReinforcementMethod < OpenAI::Internal::Type::BaseModel
-
# @!attribute grader
-
# The grader used for the fine-tuning job.
-
#
-
# @return [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader]
-
1
required :grader, union: -> { OpenAI::FineTuning::ReinforcementMethod::Grader }
-
-
# @!attribute hyperparameters
-
# The hyperparameters used for the reinforcement fine-tuning job.
-
#
-
# @return [OpenAI::Models::FineTuning::ReinforcementHyperparameters, nil]
-
1
optional :hyperparameters, -> { OpenAI::FineTuning::ReinforcementHyperparameters }
-
-
# @!method initialize(grader:, hyperparameters: nil)
-
# Configuration for the reinforcement fine-tuning method.
-
#
-
# @param grader [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] The grader used for the fine-tuning job.
-
#
-
# @param hyperparameters [OpenAI::Models::FineTuning::ReinforcementHyperparameters] The hyperparameters used for the reinforcement fine-tuning job.
-
-
# The grader used for the fine-tuning job.
-
#
-
# @see OpenAI::Models::FineTuning::ReinforcementMethod#grader
-
1
module Grader
-
1
extend OpenAI::Internal::Type::Union
-
-
# A StringCheckGrader object that performs a string comparison between input and reference using a specified operation.
-
1
variant -> { OpenAI::Graders::StringCheckGrader }
-
-
# A TextSimilarityGrader object which grades text based on similarity metrics.
-
1
variant -> { OpenAI::Graders::TextSimilarityGrader }
-
-
# A PythonGrader object that runs a python script on the input.
-
1
variant -> { OpenAI::Graders::PythonGrader }
-
-
# A ScoreModelGrader object that uses a model to assign a score to the input.
-
1
variant -> { OpenAI::Graders::ScoreModelGrader }
-
-
# A MultiGrader object combines the output of multiple graders to produce a single score.
-
1
variant -> { OpenAI::Graders::MultiGrader }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader)]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
class SupervisedHyperparameters < OpenAI::Internal::Type::BaseModel
-
# @!attribute batch_size
-
# Number of examples in each batch. A larger batch size means that model
-
# parameters are updated less frequently, but with lower variance.
-
#
-
# @return [Symbol, :auto, Integer, nil]
-
1
optional :batch_size, union: -> { OpenAI::FineTuning::SupervisedHyperparameters::BatchSize }
-
-
# @!attribute learning_rate_multiplier
-
# Scaling factor for the learning rate. A smaller learning rate may be useful to
-
# avoid overfitting.
-
#
-
# @return [Symbol, :auto, Float, nil]
-
1
optional :learning_rate_multiplier,
-
union: -> { OpenAI::FineTuning::SupervisedHyperparameters::LearningRateMultiplier }
-
-
# @!attribute n_epochs
-
# The number of epochs to train the model for. An epoch refers to one full cycle
-
# through the training dataset.
-
#
-
# @return [Symbol, :auto, Integer, nil]
-
1
optional :n_epochs, union: -> { OpenAI::FineTuning::SupervisedHyperparameters::NEpochs }
-
-
# @!method initialize(batch_size: nil, learning_rate_multiplier: nil, n_epochs: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::SupervisedHyperparameters} for more details.
-
#
-
# The hyperparameters used for the fine-tuning job.
-
#
-
# @param batch_size [Symbol, :auto, Integer] Number of examples in each batch. A larger batch size means that model parameter
-
#
-
# @param learning_rate_multiplier [Symbol, :auto, Float] Scaling factor for the learning rate. A smaller learning rate may be useful to a
-
#
-
# @param n_epochs [Symbol, :auto, Integer] The number of epochs to train the model for. An epoch refers to one full cycle t
-
-
# Number of examples in each batch. A larger batch size means that model
-
# parameters are updated less frequently, but with lower variance.
-
#
-
# @see OpenAI::Models::FineTuning::SupervisedHyperparameters#batch_size
-
1
module BatchSize
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Integer
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Integer)]
-
end
-
-
# Scaling factor for the learning rate. A smaller learning rate may be useful to
-
# avoid overfitting.
-
#
-
# @see OpenAI::Models::FineTuning::SupervisedHyperparameters#learning_rate_multiplier
-
1
module LearningRateMultiplier
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Float
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Float)]
-
end
-
-
# The number of epochs to train the model for. An epoch refers to one full cycle
-
# through the training dataset.
-
#
-
# @see OpenAI::Models::FineTuning::SupervisedHyperparameters#n_epochs
-
1
module NEpochs
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant const: :auto
-
-
1
variant Integer
-
-
# @!method self.variants
-
# @return [Array(Symbol, :auto, Integer)]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module FineTuning
-
1
class SupervisedMethod < OpenAI::Internal::Type::BaseModel
-
# @!attribute hyperparameters
-
# The hyperparameters used for the fine-tuning job.
-
#
-
# @return [OpenAI::Models::FineTuning::SupervisedHyperparameters, nil]
-
1
optional :hyperparameters, -> { OpenAI::FineTuning::SupervisedHyperparameters }
-
-
# @!method initialize(hyperparameters: nil)
-
# Configuration for the supervised fine-tuning method.
-
#
-
# @param hyperparameters [OpenAI::Models::FineTuning::SupervisedHyperparameters] The hyperparameters used for the fine-tuning job.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class FunctionDefinition < OpenAI::Internal::Type::BaseModel
-
# @!attribute name
-
# The name of the function to be called. Must be a-z, A-Z, 0-9, or contain
-
# underscores and dashes, with a maximum length of 64.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute description
-
# A description of what the function does, used by the model to choose when and
-
# how to call the function.
-
#
-
# @return [String, nil]
-
1
optional :description, String
-
-
# @!attribute parameters
-
# The parameters the functions accepts, described as a JSON Schema object. See the
-
# [guide](https://platform.openai.com/docs/guides/function-calling) for examples,
-
# and the
-
# [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for
-
# documentation about the format.
-
#
-
# Omitting `parameters` defines a function with an empty parameter list.
-
#
-
# @return [Hash{Symbol=>Object}, nil]
-
1
optional :parameters, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute strict
-
# Whether to enable strict schema adherence when generating the function call. If
-
# set to true, the model will follow the exact schema defined in the `parameters`
-
# field. Only a subset of JSON Schema is supported when `strict` is `true`. Learn
-
# more about Structured Outputs in the
-
# [function calling guide](https://platform.openai.com/docs/guides/function-calling).
-
#
-
# @return [Boolean, nil]
-
1
optional :strict, OpenAI::Internal::Type::Boolean, nil?: true
-
-
# @!method initialize(name:, description: nil, parameters: nil, strict: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FunctionDefinition} for more details.
-
#
-
# @param name [String] The name of the function to be called. Must be a-z, A-Z, 0-9, or contain undersc
-
#
-
# @param description [String] A description of what the function does, used by the model to choose when and ho
-
#
-
# @param parameters [Hash{Symbol=>Object}] The parameters the functions accepts, described as a JSON Schema object. See the
-
#
-
# @param strict [Boolean, nil] Whether to enable strict schema adherence when generating the function call. If
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @type [OpenAI::Internal::Type::Converter]
-
1
FunctionParameters = OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Graders
-
1
class LabelModelGrader < OpenAI::Internal::Type::BaseModel
-
# @!attribute input
-
#
-
# @return [Array<OpenAI::Models::Graders::LabelModelGrader::Input>]
-
1
required :input, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Graders::LabelModelGrader::Input] }
-
-
# @!attribute labels
-
# The labels to assign to each item in the evaluation.
-
#
-
# @return [Array<String>]
-
1
required :labels, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute model
-
# The model to use for the evaluation. Must support structured outputs.
-
#
-
# @return [String]
-
1
required :model, String
-
-
# @!attribute name
-
# The name of the grader.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute passing_labels
-
# The labels that indicate a passing result. Must be a subset of labels.
-
#
-
# @return [Array<String>]
-
1
required :passing_labels, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute type
-
# The object type, which is always `label_model`.
-
#
-
# @return [Symbol, :label_model]
-
1
required :type, const: :label_model
-
-
# @!method initialize(input:, labels:, model:, name:, passing_labels:, type: :label_model)
-
# A LabelModelGrader object which uses a model to assign labels to each item in
-
# the evaluation.
-
#
-
# @param input [Array<OpenAI::Models::Graders::LabelModelGrader::Input>]
-
#
-
# @param labels [Array<String>] The labels to assign to each item in the evaluation.
-
#
-
# @param model [String] The model to use for the evaluation. Must support structured outputs.
-
#
-
# @param name [String] The name of the grader.
-
#
-
# @param passing_labels [Array<String>] The labels that indicate a passing result. Must be a subset of labels.
-
#
-
# @param type [Symbol, :label_model] The object type, which is always `label_model`.
-
-
1
class Input < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# Inputs to the model - can contain template strings.
-
#
-
# @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage, Array<Object>]
-
1
required :content, union: -> { OpenAI::Graders::LabelModelGrader::Input::Content }
-
-
# @!attribute role
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @return [Symbol, OpenAI::Models::Graders::LabelModelGrader::Input::Role]
-
1
required :role, enum: -> { OpenAI::Graders::LabelModelGrader::Input::Role }
-
-
# @!attribute type
-
# The type of the message input. Always `message`.
-
#
-
# @return [Symbol, OpenAI::Models::Graders::LabelModelGrader::Input::Type, nil]
-
1
optional :type, enum: -> { OpenAI::Graders::LabelModelGrader::Input::Type }
-
-
# @!method initialize(content:, role:, type: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Graders::LabelModelGrader::Input} for more details.
-
#
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
#
-
# @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage, Array<Object>] Inputs to the model - can contain template strings.
-
#
-
# @param role [Symbol, OpenAI::Models::Graders::LabelModelGrader::Input::Role] The role of the message input. One of `user`, `assistant`, `system`, or
-
#
-
# @param type [Symbol, OpenAI::Models::Graders::LabelModelGrader::Input::Type] The type of the message input. Always `message`.
-
-
# Inputs to the model - can contain template strings.
-
#
-
# @see OpenAI::Models::Graders::LabelModelGrader::Input#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# A text input to the model.
-
1
variant String
-
-
# A text input to the model.
-
1
variant -> { OpenAI::Responses::ResponseInputText }
-
-
# A text output from the model.
-
1
variant -> { OpenAI::Graders::LabelModelGrader::Input::Content::OutputText }
-
-
# An image input to the model.
-
1
variant -> { OpenAI::Graders::LabelModelGrader::Input::Content::InputImage }
-
-
# A list of inputs, each of which may be either an input text or input image object.
-
1
variant -> { OpenAI::Models::Graders::LabelModelGrader::Input::Content::AnArrayOfInputTextAndInputImageArray }
-
-
1
class OutputText < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The text output from the model.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of the output text. Always `output_text`.
-
#
-
# @return [Symbol, :output_text]
-
1
required :type, const: :output_text
-
-
# @!method initialize(text:, type: :output_text)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText} for more
-
# details.
-
#
-
# A text output from the model.
-
#
-
# @param text [String] The text output from the model.
-
#
-
# @param type [Symbol, :output_text] The type of the output text. Always `output_text`.
-
end
-
-
1
class InputImage < OpenAI::Internal::Type::BaseModel
-
# @!attribute image_url
-
# The URL of the image input.
-
#
-
# @return [String]
-
1
required :image_url, String
-
-
# @!attribute type
-
# The type of the image input. Always `input_image`.
-
#
-
# @return [Symbol, :input_image]
-
1
required :type, const: :input_image
-
-
# @!attribute detail
-
# The detail level of the image to be sent to the model. One of `high`, `low`, or
-
# `auto`. Defaults to `auto`.
-
#
-
# @return [String, nil]
-
1
optional :detail, String
-
-
# @!method initialize(image_url:, detail: nil, type: :input_image)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage} for more
-
# details.
-
#
-
# An image input to the model.
-
#
-
# @param image_url [String] The URL of the image input.
-
#
-
# @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or
-
#
-
# @param type [Symbol, :input_image] The type of the image input. Always `input_image`.
-
end
-
-
# @!method self.variants
-
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::LabelModelGrader::Input::Content::InputImage, Array<Object>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
-
end
-
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @see OpenAI::Models::Graders::LabelModelGrader::Input#role
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
USER = :user
-
1
ASSISTANT = :assistant
-
1
SYSTEM = :system
-
1
DEVELOPER = :developer
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The type of the message input. Always `message`.
-
#
-
# @see OpenAI::Models::Graders::LabelModelGrader::Input#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
MESSAGE = :message
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
-
1
LabelModelGrader = Graders::LabelModelGrader
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Graders
-
1
class MultiGrader < OpenAI::Internal::Type::BaseModel
-
# @!attribute calculate_output
-
# A formula to calculate the output based on grader results.
-
#
-
# @return [String]
-
1
required :calculate_output, String
-
-
# @!attribute graders
-
# A StringCheckGrader object that performs a string comparison between input and
-
# reference using a specified operation.
-
#
-
# @return [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::LabelModelGrader]
-
1
required :graders, union: -> { OpenAI::Graders::MultiGrader::Graders }
-
-
# @!attribute name
-
# The name of the grader.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute type
-
# The object type, which is always `multi`.
-
#
-
# @return [Symbol, :multi]
-
1
required :type, const: :multi
-
-
# @!method initialize(calculate_output:, graders:, name:, type: :multi)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Graders::MultiGrader} for more details.
-
#
-
# A MultiGrader object combines the output of multiple graders to produce a single
-
# score.
-
#
-
# @param calculate_output [String] A formula to calculate the output based on grader results.
-
#
-
# @param graders [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::LabelModelGrader] A StringCheckGrader object that performs a string comparison between input and r
-
#
-
# @param name [String] The name of the grader.
-
#
-
# @param type [Symbol, :multi] The object type, which is always `multi`.
-
-
# A StringCheckGrader object that performs a string comparison between input and
-
# reference using a specified operation.
-
#
-
# @see OpenAI::Models::Graders::MultiGrader#graders
-
1
module Graders
-
1
extend OpenAI::Internal::Type::Union
-
-
# A StringCheckGrader object that performs a string comparison between input and reference using a specified operation.
-
1
variant -> { OpenAI::Graders::StringCheckGrader }
-
-
# A TextSimilarityGrader object which grades text based on similarity metrics.
-
1
variant -> { OpenAI::Graders::TextSimilarityGrader }
-
-
# A PythonGrader object that runs a python script on the input.
-
1
variant -> { OpenAI::Graders::PythonGrader }
-
-
# A ScoreModelGrader object that uses a model to assign a score to the input.
-
1
variant -> { OpenAI::Graders::ScoreModelGrader }
-
-
# A LabelModelGrader object which uses a model to assign labels to each item
-
# in the evaluation.
-
1
variant -> { OpenAI::Graders::LabelModelGrader }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::LabelModelGrader)]
-
end
-
end
-
end
-
-
1
MultiGrader = Graders::MultiGrader
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Graders
-
1
class PythonGrader < OpenAI::Internal::Type::BaseModel
-
# @!attribute name
-
# The name of the grader.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute source
-
# The source code of the python script.
-
#
-
# @return [String]
-
1
required :source, String
-
-
# @!attribute type
-
# The object type, which is always `python`.
-
#
-
# @return [Symbol, :python]
-
1
required :type, const: :python
-
-
# @!attribute image_tag
-
# The image tag to use for the python script.
-
#
-
# @return [String, nil]
-
1
optional :image_tag, String
-
-
# @!method initialize(name:, source:, image_tag: nil, type: :python)
-
# A PythonGrader object that runs a python script on the input.
-
#
-
# @param name [String] The name of the grader.
-
#
-
# @param source [String] The source code of the python script.
-
#
-
# @param image_tag [String] The image tag to use for the python script.
-
#
-
# @param type [Symbol, :python] The object type, which is always `python`.
-
end
-
end
-
-
1
PythonGrader = Graders::PythonGrader
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Graders
-
1
class ScoreModelGrader < OpenAI::Internal::Type::BaseModel
-
# @!attribute input
-
# The input text. This may include template strings.
-
#
-
# @return [Array<OpenAI::Models::Graders::ScoreModelGrader::Input>]
-
1
required :input, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Graders::ScoreModelGrader::Input] }
-
-
# @!attribute model
-
# The model to use for the evaluation.
-
#
-
# @return [String]
-
1
required :model, String
-
-
# @!attribute name
-
# The name of the grader.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute type
-
# The object type, which is always `score_model`.
-
#
-
# @return [Symbol, :score_model]
-
1
required :type, const: :score_model
-
-
# @!attribute range
-
# The range of the score. Defaults to `[0, 1]`.
-
#
-
# @return [Array<Float>, nil]
-
1
optional :range, OpenAI::Internal::Type::ArrayOf[Float]
-
-
# @!attribute sampling_params
-
# The sampling parameters for the model.
-
#
-
# @return [Object, nil]
-
1
optional :sampling_params, OpenAI::Internal::Type::Unknown
-
-
# @!method initialize(input:, model:, name:, range: nil, sampling_params: nil, type: :score_model)
-
# A ScoreModelGrader object that uses a model to assign a score to the input.
-
#
-
# @param input [Array<OpenAI::Models::Graders::ScoreModelGrader::Input>] The input text. This may include template strings.
-
#
-
# @param model [String] The model to use for the evaluation.
-
#
-
# @param name [String] The name of the grader.
-
#
-
# @param range [Array<Float>] The range of the score. Defaults to `[0, 1]`.
-
#
-
# @param sampling_params [Object] The sampling parameters for the model.
-
#
-
# @param type [Symbol, :score_model] The object type, which is always `score_model`.
-
-
1
class Input < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# Inputs to the model - can contain template strings.
-
#
-
# @return [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage, Array<Object>]
-
1
required :content, union: -> { OpenAI::Graders::ScoreModelGrader::Input::Content }
-
-
# @!attribute role
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @return [Symbol, OpenAI::Models::Graders::ScoreModelGrader::Input::Role]
-
1
required :role, enum: -> { OpenAI::Graders::ScoreModelGrader::Input::Role }
-
-
# @!attribute type
-
# The type of the message input. Always `message`.
-
#
-
# @return [Symbol, OpenAI::Models::Graders::ScoreModelGrader::Input::Type, nil]
-
1
optional :type, enum: -> { OpenAI::Graders::ScoreModelGrader::Input::Type }
-
-
# @!method initialize(content:, role:, type: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Graders::ScoreModelGrader::Input} for more details.
-
#
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
#
-
# @param content [String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage, Array<Object>] Inputs to the model - can contain template strings.
-
#
-
# @param role [Symbol, OpenAI::Models::Graders::ScoreModelGrader::Input::Role] The role of the message input. One of `user`, `assistant`, `system`, or
-
#
-
# @param type [Symbol, OpenAI::Models::Graders::ScoreModelGrader::Input::Type] The type of the message input. Always `message`.
-
-
# Inputs to the model - can contain template strings.
-
#
-
# @see OpenAI::Models::Graders::ScoreModelGrader::Input#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# A text input to the model.
-
1
variant String
-
-
# A text input to the model.
-
1
variant -> { OpenAI::Responses::ResponseInputText }
-
-
# A text output from the model.
-
1
variant -> { OpenAI::Graders::ScoreModelGrader::Input::Content::OutputText }
-
-
# An image input to the model.
-
1
variant -> { OpenAI::Graders::ScoreModelGrader::Input::Content::InputImage }
-
-
# A list of inputs, each of which may be either an input text or input image object.
-
1
variant -> { OpenAI::Models::Graders::ScoreModelGrader::Input::Content::AnArrayOfInputTextAndInputImageArray }
-
-
1
class OutputText < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The text output from the model.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of the output text. Always `output_text`.
-
#
-
# @return [Symbol, :output_text]
-
1
required :type, const: :output_text
-
-
# @!method initialize(text:, type: :output_text)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText} for more
-
# details.
-
#
-
# A text output from the model.
-
#
-
# @param text [String] The text output from the model.
-
#
-
# @param type [Symbol, :output_text] The type of the output text. Always `output_text`.
-
end
-
-
1
class InputImage < OpenAI::Internal::Type::BaseModel
-
# @!attribute image_url
-
# The URL of the image input.
-
#
-
# @return [String]
-
1
required :image_url, String
-
-
# @!attribute type
-
# The type of the image input. Always `input_image`.
-
#
-
# @return [Symbol, :input_image]
-
1
required :type, const: :input_image
-
-
# @!attribute detail
-
# The detail level of the image to be sent to the model. One of `high`, `low`, or
-
# `auto`. Defaults to `auto`.
-
#
-
# @return [String, nil]
-
1
optional :detail, String
-
-
# @!method initialize(image_url:, detail: nil, type: :input_image)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage} for more
-
# details.
-
#
-
# An image input to the model.
-
#
-
# @param image_url [String] The URL of the image input.
-
#
-
# @param detail [String] The detail level of the image to be sent to the model. One of `high`, `low`, or
-
#
-
# @param type [Symbol, :input_image] The type of the image input. Always `input_image`.
-
end
-
-
# @!method self.variants
-
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::OutputText, OpenAI::Models::Graders::ScoreModelGrader::Input::Content::InputImage, Array<Object>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
AnArrayOfInputTextAndInputImageArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::Unknown]
-
end
-
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @see OpenAI::Models::Graders::ScoreModelGrader::Input#role
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
USER = :user
-
1
ASSISTANT = :assistant
-
1
SYSTEM = :system
-
1
DEVELOPER = :developer
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The type of the message input. Always `message`.
-
#
-
# @see OpenAI::Models::Graders::ScoreModelGrader::Input#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
MESSAGE = :message
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
-
1
ScoreModelGrader = Graders::ScoreModelGrader
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Graders
-
1
class StringCheckGrader < OpenAI::Internal::Type::BaseModel
-
# @!attribute input
-
# The input text. This may include template strings.
-
#
-
# @return [String]
-
1
required :input, String
-
-
# @!attribute name
-
# The name of the grader.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute operation
-
# The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`.
-
#
-
# @return [Symbol, OpenAI::Models::Graders::StringCheckGrader::Operation]
-
1
required :operation, enum: -> { OpenAI::Graders::StringCheckGrader::Operation }
-
-
# @!attribute reference
-
# The reference text. This may include template strings.
-
#
-
# @return [String]
-
1
required :reference, String
-
-
# @!attribute type
-
# The object type, which is always `string_check`.
-
#
-
# @return [Symbol, :string_check]
-
1
required :type, const: :string_check
-
-
# @!method initialize(input:, name:, operation:, reference:, type: :string_check)
-
# A StringCheckGrader object that performs a string comparison between input and
-
# reference using a specified operation.
-
#
-
# @param input [String] The input text. This may include template strings.
-
#
-
# @param name [String] The name of the grader.
-
#
-
# @param operation [Symbol, OpenAI::Models::Graders::StringCheckGrader::Operation] The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`.
-
#
-
# @param reference [String] The reference text. This may include template strings.
-
#
-
# @param type [Symbol, :string_check] The object type, which is always `string_check`.
-
-
# The string check operation to perform. One of `eq`, `ne`, `like`, or `ilike`.
-
#
-
# @see OpenAI::Models::Graders::StringCheckGrader#operation
-
1
module Operation
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
EQ = :eq
-
1
NE = :ne
-
1
LIKE = :like
-
1
ILIKE = :ilike
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
-
1
StringCheckGrader = Graders::StringCheckGrader
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Graders
-
1
class TextSimilarityGrader < OpenAI::Internal::Type::BaseModel
-
# @!attribute evaluation_metric
-
# The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`,
-
# `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
-
#
-
# @return [Symbol, OpenAI::Models::Graders::TextSimilarityGrader::EvaluationMetric]
-
1
required :evaluation_metric, enum: -> { OpenAI::Graders::TextSimilarityGrader::EvaluationMetric }
-
-
# @!attribute input
-
# The text being graded.
-
#
-
# @return [String]
-
1
required :input, String
-
-
# @!attribute name
-
# The name of the grader.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute reference
-
# The text being graded against.
-
#
-
# @return [String]
-
1
required :reference, String
-
-
# @!attribute type
-
# The type of grader.
-
#
-
# @return [Symbol, :text_similarity]
-
1
required :type, const: :text_similarity
-
-
# @!method initialize(evaluation_metric:, input:, name:, reference:, type: :text_similarity)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Graders::TextSimilarityGrader} for more details.
-
#
-
# A TextSimilarityGrader object which grades text based on similarity metrics.
-
#
-
# @param evaluation_metric [Symbol, OpenAI::Models::Graders::TextSimilarityGrader::EvaluationMetric] The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`, `r
-
#
-
# @param input [String] The text being graded.
-
#
-
# @param name [String] The name of the grader.
-
#
-
# @param reference [String] The text being graded against.
-
#
-
# @param type [Symbol, :text_similarity] The type of grader.
-
-
# The evaluation metric to use. One of `fuzzy_match`, `bleu`, `gleu`, `meteor`,
-
# `rouge_1`, `rouge_2`, `rouge_3`, `rouge_4`, `rouge_5`, or `rouge_l`.
-
#
-
# @see OpenAI::Models::Graders::TextSimilarityGrader#evaluation_metric
-
1
module EvaluationMetric
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
FUZZY_MATCH = :fuzzy_match
-
1
BLEU = :bleu
-
1
GLEU = :gleu
-
1
METEOR = :meteor
-
1
ROUGE_1 = :rouge_1
-
1
ROUGE_2 = :rouge_2
-
1
ROUGE_3 = :rouge_3
-
1
ROUGE_4 = :rouge_4
-
1
ROUGE_5 = :rouge_5
-
1
ROUGE_L = :rouge_l
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
-
1
TextSimilarityGrader = Graders::TextSimilarityGrader
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class Image < OpenAI::Internal::Type::BaseModel
-
# @!attribute b64_json
-
# The base64-encoded JSON of the generated image. Default value for `gpt-image-1`,
-
# and only present if `response_format` is set to `b64_json` for `dall-e-2` and
-
# `dall-e-3`.
-
#
-
# @return [String, nil]
-
1
optional :b64_json, String
-
-
# @!attribute revised_prompt
-
# For `dall-e-3` only, the revised prompt that was used to generate the image.
-
#
-
# @return [String, nil]
-
1
optional :revised_prompt, String
-
-
# @!attribute url
-
# When using `dall-e-2` or `dall-e-3`, the URL of the generated image if
-
# `response_format` is set to `url` (default value). Unsupported for
-
# `gpt-image-1`.
-
#
-
# @return [String, nil]
-
1
optional :url, String
-
-
# @!method initialize(b64_json: nil, revised_prompt: nil, url: nil)
-
# Some parameter documentations has been truncated, see {OpenAI::Models::Image}
-
# for more details.
-
#
-
# Represents the content or the URL of an image generated by the OpenAI API.
-
#
-
# @param b64_json [String] The base64-encoded JSON of the generated image. Default value for `gpt-image-1`,
-
#
-
# @param revised_prompt [String] For `dall-e-3` only, the revised prompt that was used to generate the image.
-
#
-
# @param url [String] When using `dall-e-2` or `dall-e-3`, the URL of the generated image if `response
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Images#create_variation
-
1
class ImageCreateVariationParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute image
-
# The image to use as the basis for the variation(s). Must be a valid PNG file,
-
# less than 4MB, and square.
-
#
-
# @return [Pathname, StringIO, IO, String, OpenAI::FilePart]
-
1
required :image, OpenAI::Internal::Type::FileInput
-
-
# @!attribute model
-
# The model to use for image generation. Only `dall-e-2` is supported at this
-
# time.
-
#
-
# @return [String, Symbol, OpenAI::Models::ImageModel, nil]
-
1
optional :model, union: -> { OpenAI::ImageCreateVariationParams::Model }, nil?: true
-
-
# @!attribute n
-
# The number of images to generate. Must be between 1 and 10.
-
#
-
# @return [Integer, nil]
-
1
optional :n, Integer, nil?: true
-
-
# @!attribute response_format
-
# The format in which the generated images are returned. Must be one of `url` or
-
# `b64_json`. URLs are only valid for 60 minutes after the image has been
-
# generated.
-
#
-
# @return [Symbol, OpenAI::Models::ImageCreateVariationParams::ResponseFormat, nil]
-
1
optional :response_format, enum: -> { OpenAI::ImageCreateVariationParams::ResponseFormat }, nil?: true
-
-
# @!attribute size
-
# The size of the generated images. Must be one of `256x256`, `512x512`, or
-
# `1024x1024`.
-
#
-
# @return [Symbol, OpenAI::Models::ImageCreateVariationParams::Size, nil]
-
1
optional :size, enum: -> { OpenAI::ImageCreateVariationParams::Size }, nil?: true
-
-
# @!attribute user
-
# A unique identifier representing your end-user, which can help OpenAI to monitor
-
# and detect abuse.
-
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
-
#
-
# @return [String, nil]
-
1
optional :user, String
-
-
# @!method initialize(image:, model: nil, n: nil, response_format: nil, size: nil, user: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ImageCreateVariationParams} for more details.
-
#
-
# @param image [Pathname, StringIO, IO, String, OpenAI::FilePart] The image to use as the basis for the variation(s). Must be a valid PNG file, le
-
#
-
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` is supported at this time
-
#
-
# @param n [Integer, nil] The number of images to generate. Must be between 1 and 10.
-
#
-
# @param response_format [Symbol, OpenAI::Models::ImageCreateVariationParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or `
-
#
-
# @param size [Symbol, OpenAI::Models::ImageCreateVariationParams::Size, nil] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x
-
#
-
# @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# The model to use for image generation. Only `dall-e-2` is supported at this
-
# time.
-
1
module Model
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
# The model to use for image generation. Only `dall-e-2` is supported at this time.
-
1
variant enum: -> { OpenAI::ImageModel }
-
-
# @!method self.variants
-
# @return [Array(String, Symbol, OpenAI::Models::ImageModel)]
-
end
-
-
# The format in which the generated images are returned. Must be one of `url` or
-
# `b64_json`. URLs are only valid for 60 minutes after the image has been
-
# generated.
-
1
module ResponseFormat
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
URL = :url
-
1
B64_JSON = :b64_json
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The size of the generated images. Must be one of `256x256`, `512x512`, or
-
# `1024x1024`.
-
1
module Size
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
SIZE_256X256 = :"256x256"
-
1
SIZE_512X512 = :"512x512"
-
1
SIZE_1024X1024 = :"1024x1024"
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class ImageEditCompletedEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute b64_json
-
# Base64-encoded final edited image data, suitable for rendering as an image.
-
#
-
# @return [String]
-
1
required :b64_json, String
-
-
# @!attribute background
-
# The background setting for the edited image.
-
#
-
# @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::Background]
-
1
required :background, enum: -> { OpenAI::ImageEditCompletedEvent::Background }
-
-
# @!attribute created_at
-
# The Unix timestamp when the event was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute output_format
-
# The output format for the edited image.
-
#
-
# @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::OutputFormat]
-
1
required :output_format, enum: -> { OpenAI::ImageEditCompletedEvent::OutputFormat }
-
-
# @!attribute quality
-
# The quality setting for the edited image.
-
#
-
# @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::Quality]
-
1
required :quality, enum: -> { OpenAI::ImageEditCompletedEvent::Quality }
-
-
# @!attribute size
-
# The size of the edited image.
-
#
-
# @return [Symbol, OpenAI::Models::ImageEditCompletedEvent::Size]
-
1
required :size, enum: -> { OpenAI::ImageEditCompletedEvent::Size }
-
-
# @!attribute type
-
# The type of the event. Always `image_edit.completed`.
-
#
-
# @return [Symbol, :"image_edit.completed"]
-
1
required :type, const: :"image_edit.completed"
-
-
# @!attribute usage
-
# For `gpt-image-1` only, the token usage information for the image generation.
-
#
-
# @return [OpenAI::Models::ImageEditCompletedEvent::Usage]
-
1
required :usage, -> { OpenAI::ImageEditCompletedEvent::Usage }
-
-
# @!method initialize(b64_json:, background:, created_at:, output_format:, quality:, size:, usage:, type: :"image_edit.completed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ImageEditCompletedEvent} for more details.
-
#
-
# Emitted when image editing has completed and the final image is available.
-
#
-
# @param b64_json [String] Base64-encoded final edited image data, suitable for rendering as an image.
-
#
-
# @param background [Symbol, OpenAI::Models::ImageEditCompletedEvent::Background] The background setting for the edited image.
-
#
-
# @param created_at [Integer] The Unix timestamp when the event was created.
-
#
-
# @param output_format [Symbol, OpenAI::Models::ImageEditCompletedEvent::OutputFormat] The output format for the edited image.
-
#
-
# @param quality [Symbol, OpenAI::Models::ImageEditCompletedEvent::Quality] The quality setting for the edited image.
-
#
-
# @param size [Symbol, OpenAI::Models::ImageEditCompletedEvent::Size] The size of the edited image.
-
#
-
# @param usage [OpenAI::Models::ImageEditCompletedEvent::Usage] For `gpt-image-1` only, the token usage information for the image generation.
-
#
-
# @param type [Symbol, :"image_edit.completed"] The type of the event. Always `image_edit.completed`.
-
-
# The background setting for the edited image.
-
#
-
# @see OpenAI::Models::ImageEditCompletedEvent#background
-
1
module Background
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TRANSPARENT = :transparent
-
1
OPAQUE = :opaque
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The output format for the edited image.
-
#
-
# @see OpenAI::Models::ImageEditCompletedEvent#output_format
-
1
module OutputFormat
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
PNG = :png
-
1
WEBP = :webp
-
1
JPEG = :jpeg
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The quality setting for the edited image.
-
#
-
# @see OpenAI::Models::ImageEditCompletedEvent#quality
-
1
module Quality
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
LOW = :low
-
1
MEDIUM = :medium
-
1
HIGH = :high
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The size of the edited image.
-
#
-
# @see OpenAI::Models::ImageEditCompletedEvent#size
-
1
module Size
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
SIZE_1024X1024 = :"1024x1024"
-
1
SIZE_1024X1536 = :"1024x1536"
-
1
SIZE_1536X1024 = :"1536x1024"
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @see OpenAI::Models::ImageEditCompletedEvent#usage
-
1
class Usage < OpenAI::Internal::Type::BaseModel
-
# @!attribute input_tokens
-
# The number of tokens (images and text) in the input prompt.
-
#
-
# @return [Integer]
-
1
required :input_tokens, Integer
-
-
# @!attribute input_tokens_details
-
# The input tokens detailed information for the image generation.
-
#
-
# @return [OpenAI::Models::ImageEditCompletedEvent::Usage::InputTokensDetails]
-
1
required :input_tokens_details, -> { OpenAI::ImageEditCompletedEvent::Usage::InputTokensDetails }
-
-
# @!attribute output_tokens
-
# The number of image tokens in the output image.
-
#
-
# @return [Integer]
-
1
required :output_tokens, Integer
-
-
# @!attribute total_tokens
-
# The total number of tokens (images and text) used for the image generation.
-
#
-
# @return [Integer]
-
1
required :total_tokens, Integer
-
-
# @!method initialize(input_tokens:, input_tokens_details:, output_tokens:, total_tokens:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ImageEditCompletedEvent::Usage} for more details.
-
#
-
# For `gpt-image-1` only, the token usage information for the image generation.
-
#
-
# @param input_tokens [Integer] The number of tokens (images and text) in the input prompt.
-
#
-
# @param input_tokens_details [OpenAI::Models::ImageEditCompletedEvent::Usage::InputTokensDetails] The input tokens detailed information for the image generation.
-
#
-
# @param output_tokens [Integer] The number of image tokens in the output image.
-
#
-
# @param total_tokens [Integer] The total number of tokens (images and text) used for the image generation.
-
-
# @see OpenAI::Models::ImageEditCompletedEvent::Usage#input_tokens_details
-
1
class InputTokensDetails < OpenAI::Internal::Type::BaseModel
-
# @!attribute image_tokens
-
# The number of image tokens in the input prompt.
-
#
-
# @return [Integer]
-
1
required :image_tokens, Integer
-
-
# @!attribute text_tokens
-
# The number of text tokens in the input prompt.
-
#
-
# @return [Integer]
-
1
required :text_tokens, Integer
-
-
# @!method initialize(image_tokens:, text_tokens:)
-
# The input tokens detailed information for the image generation.
-
#
-
# @param image_tokens [Integer] The number of image tokens in the input prompt.
-
#
-
# @param text_tokens [Integer] The number of text tokens in the input prompt.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Images#edit
-
#
-
# @see OpenAI::Resources::Images#edit_stream_raw
-
1
class ImageEditParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute image
-
# The image(s) to edit. Must be a supported image file or an array of images.
-
#
-
# For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
-
# 50MB. You can provide up to 16 images.
-
#
-
# For `dall-e-2`, you can only provide one image, and it should be a square `png`
-
# file less than 4MB.
-
#
-
# @return [Pathname, StringIO, IO, String, OpenAI::FilePart, Array<Pathname, StringIO, IO, String, OpenAI::FilePart>]
-
1
required :image, union: -> { OpenAI::ImageEditParams::Image }
-
-
# @!attribute prompt
-
# A text description of the desired image(s). The maximum length is 1000
-
# characters for `dall-e-2`, and 32000 characters for `gpt-image-1`.
-
#
-
# @return [String]
-
1
required :prompt, String
-
-
# @!attribute background
-
# Allows to set transparency for the background of the generated image(s). This
-
# parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
-
# `opaque` or `auto` (default value). When `auto` is used, the model will
-
# automatically determine the best background for the image.
-
#
-
# If `transparent`, the output format needs to support transparency, so it should
-
# be set to either `png` (default value) or `webp`.
-
#
-
# @return [Symbol, OpenAI::Models::ImageEditParams::Background, nil]
-
1
optional :background, enum: -> { OpenAI::ImageEditParams::Background }, nil?: true
-
-
# @!attribute input_fidelity
-
# Control how much effort the model will exert to match the style and features,
-
# especially facial features, of input images. This parameter is only supported
-
# for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
-
#
-
# @return [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil]
-
1
optional :input_fidelity, enum: -> { OpenAI::ImageEditParams::InputFidelity }, nil?: true
-
-
# @!attribute mask
-
# An additional image whose fully transparent areas (e.g. where alpha is zero)
-
# indicate where `image` should be edited. If there are multiple images provided,
-
# the mask will be applied on the first image. Must be a valid PNG file, less than
-
# 4MB, and have the same dimensions as `image`.
-
#
-
# @return [Pathname, StringIO, IO, String, OpenAI::FilePart, nil]
-
1
optional :mask, OpenAI::Internal::Type::FileInput
-
-
# @!attribute model
-
# The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
-
# supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
-
# is used.
-
#
-
# @return [String, Symbol, OpenAI::Models::ImageModel, nil]
-
1
optional :model, union: -> { OpenAI::ImageEditParams::Model }, nil?: true
-
-
# @!attribute n
-
# The number of images to generate. Must be between 1 and 10.
-
#
-
# @return [Integer, nil]
-
1
optional :n, Integer, nil?: true
-
-
# @!attribute output_compression
-
# The compression level (0-100%) for the generated images. This parameter is only
-
# supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
-
# defaults to 100.
-
#
-
# @return [Integer, nil]
-
1
optional :output_compression, Integer, nil?: true
-
-
# @!attribute output_format
-
# The format in which the generated images are returned. This parameter is only
-
# supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
-
# default value is `png`.
-
#
-
# @return [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil]
-
1
optional :output_format, enum: -> { OpenAI::ImageEditParams::OutputFormat }, nil?: true
-
-
# @!attribute partial_images
-
# The number of partial images to generate. This parameter is used for streaming
-
# responses that return partial images. Value must be between 0 and 3. When set to
-
# 0, the response will be a single image sent in one streaming event.
-
#
-
# Note that the final image may be sent before the full number of partial images
-
# are generated if the full image is generated more quickly.
-
#
-
# @return [Integer, nil]
-
1
optional :partial_images, Integer, nil?: true
-
-
# @!attribute quality
-
# The quality of the image that will be generated. `high`, `medium` and `low` are
-
# only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
-
# Defaults to `auto`.
-
#
-
# @return [Symbol, OpenAI::Models::ImageEditParams::Quality, nil]
-
1
optional :quality, enum: -> { OpenAI::ImageEditParams::Quality }, nil?: true
-
-
# @!attribute response_format
-
# The format in which the generated images are returned. Must be one of `url` or
-
# `b64_json`. URLs are only valid for 60 minutes after the image has been
-
# generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
-
# will always return base64-encoded images.
-
#
-
# @return [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil]
-
1
optional :response_format, enum: -> { OpenAI::ImageEditParams::ResponseFormat }, nil?: true
-
-
# @!attribute size
-
# The size of the generated images. Must be one of `1024x1024`, `1536x1024`
-
# (landscape), `1024x1536` (portrait), or `auto` (default value) for
-
# `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
-
#
-
# @return [Symbol, OpenAI::Models::ImageEditParams::Size, nil]
-
1
optional :size, enum: -> { OpenAI::ImageEditParams::Size }, nil?: true
-
-
# @!attribute user
-
# A unique identifier representing your end-user, which can help OpenAI to monitor
-
# and detect abuse.
-
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
-
#
-
# @return [String, nil]
-
1
optional :user, String
-
-
# @!method initialize(image:, prompt:, background: nil, input_fidelity: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ImageEditParams} for more details.
-
#
-
# @param image [Pathname, StringIO, IO, String, OpenAI::FilePart, Array<Pathname, StringIO, IO, String, OpenAI::FilePart>] The image(s) to edit. Must be a supported image file or an array of images.
-
#
-
# @param prompt [String] A text description of the desired image(s). The maximum length is 1000 character
-
#
-
# @param background [Symbol, OpenAI::Models::ImageEditParams::Background, nil] Allows to set transparency for the background of the generated image(s).
-
#
-
# @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] Control how much effort the model will exert to match the style and features,
-
#
-
# @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind
-
#
-
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are sup
-
#
-
# @param n [Integer, nil] The number of images to generate. Must be between 1 and 10.
-
#
-
# @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter
-
#
-
# @param output_format [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is
-
#
-
# @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for
-
#
-
# @param quality [Symbol, OpenAI::Models::ImageEditParams::Quality, nil] The quality of the image that will be generated. `high`, `medium` and `low` are
-
#
-
# @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or `
-
#
-
# @param size [Symbol, OpenAI::Models::ImageEditParams::Size, nil] The size of the generated images. Must be one of `1024x1024`, `1536x1024` (lands
-
#
-
# @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# The image(s) to edit. Must be a supported image file or an array of images.
-
#
-
# For `gpt-image-1`, each image should be a `png`, `webp`, or `jpg` file less than
-
# 50MB. You can provide up to 16 images.
-
#
-
# For `dall-e-2`, you can only provide one image, and it should be a square `png`
-
# file less than 4MB.
-
1
module Image
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant OpenAI::Internal::Type::FileInput
-
-
1
variant -> { OpenAI::Models::ImageEditParams::Image::StringArray }
-
-
# @!method self.variants
-
# @return [Array(StringIO, Array<StringIO>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
StringArray = OpenAI::Internal::Type::ArrayOf[OpenAI::Internal::Type::FileInput]
-
end
-
-
# Allows to set transparency for the background of the generated image(s). This
-
# parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
-
# `opaque` or `auto` (default value). When `auto` is used, the model will
-
# automatically determine the best background for the image.
-
#
-
# If `transparent`, the output format needs to support transparency, so it should
-
# be set to either `png` (default value) or `webp`.
-
1
module Background
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TRANSPARENT = :transparent
-
1
OPAQUE = :opaque
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# Control how much effort the model will exert to match the style and features,
-
# especially facial features, of input images. This parameter is only supported
-
# for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
-
1
module InputFidelity
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
HIGH = :high
-
1
LOW = :low
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are
-
# supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1`
-
# is used.
-
1
module Model
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
# The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are supported. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is used.
-
1
variant enum: -> { OpenAI::ImageModel }
-
-
# @!method self.variants
-
# @return [Array(String, Symbol, OpenAI::Models::ImageModel)]
-
end
-
-
# The format in which the generated images are returned. This parameter is only
-
# supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`. The
-
# default value is `png`.
-
1
module OutputFormat
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
PNG = :png
-
1
JPEG = :jpeg
-
1
WEBP = :webp
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The quality of the image that will be generated. `high`, `medium` and `low` are
-
# only supported for `gpt-image-1`. `dall-e-2` only supports `standard` quality.
-
# Defaults to `auto`.
-
1
module Quality
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
STANDARD = :standard
-
1
LOW = :low
-
1
MEDIUM = :medium
-
1
HIGH = :high
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The format in which the generated images are returned. Must be one of `url` or
-
# `b64_json`. URLs are only valid for 60 minutes after the image has been
-
# generated. This parameter is only supported for `dall-e-2`, as `gpt-image-1`
-
# will always return base64-encoded images.
-
1
module ResponseFormat
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
URL = :url
-
1
B64_JSON = :b64_json
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The size of the generated images. Must be one of `1024x1024`, `1536x1024`
-
# (landscape), `1024x1536` (portrait), or `auto` (default value) for
-
# `gpt-image-1`, and one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`.
-
1
module Size
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
SIZE_256X256 = :"256x256"
-
1
SIZE_512X512 = :"512x512"
-
1
SIZE_1024X1024 = :"1024x1024"
-
1
SIZE_1536X1024 = :"1536x1024"
-
1
SIZE_1024X1536 = :"1024x1536"
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class ImageEditPartialImageEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute b64_json
-
# Base64-encoded partial image data, suitable for rendering as an image.
-
#
-
# @return [String]
-
1
required :b64_json, String
-
-
# @!attribute background
-
# The background setting for the requested edited image.
-
#
-
# @return [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Background]
-
1
required :background, enum: -> { OpenAI::ImageEditPartialImageEvent::Background }
-
-
# @!attribute created_at
-
# The Unix timestamp when the event was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute output_format
-
# The output format for the requested edited image.
-
#
-
# @return [Symbol, OpenAI::Models::ImageEditPartialImageEvent::OutputFormat]
-
1
required :output_format, enum: -> { OpenAI::ImageEditPartialImageEvent::OutputFormat }
-
-
# @!attribute partial_image_index
-
# 0-based index for the partial image (streaming).
-
#
-
# @return [Integer]
-
1
required :partial_image_index, Integer
-
-
# @!attribute quality
-
# The quality setting for the requested edited image.
-
#
-
# @return [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Quality]
-
1
required :quality, enum: -> { OpenAI::ImageEditPartialImageEvent::Quality }
-
-
# @!attribute size
-
# The size of the requested edited image.
-
#
-
# @return [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Size]
-
1
required :size, enum: -> { OpenAI::ImageEditPartialImageEvent::Size }
-
-
# @!attribute type
-
# The type of the event. Always `image_edit.partial_image`.
-
#
-
# @return [Symbol, :"image_edit.partial_image"]
-
1
required :type, const: :"image_edit.partial_image"
-
-
# @!method initialize(b64_json:, background:, created_at:, output_format:, partial_image_index:, quality:, size:, type: :"image_edit.partial_image")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ImageEditPartialImageEvent} for more details.
-
#
-
# Emitted when a partial image is available during image editing streaming.
-
#
-
# @param b64_json [String] Base64-encoded partial image data, suitable for rendering as an image.
-
#
-
# @param background [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Background] The background setting for the requested edited image.
-
#
-
# @param created_at [Integer] The Unix timestamp when the event was created.
-
#
-
# @param output_format [Symbol, OpenAI::Models::ImageEditPartialImageEvent::OutputFormat] The output format for the requested edited image.
-
#
-
# @param partial_image_index [Integer] 0-based index for the partial image (streaming).
-
#
-
# @param quality [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Quality] The quality setting for the requested edited image.
-
#
-
# @param size [Symbol, OpenAI::Models::ImageEditPartialImageEvent::Size] The size of the requested edited image.
-
#
-
# @param type [Symbol, :"image_edit.partial_image"] The type of the event. Always `image_edit.partial_image`.
-
-
# The background setting for the requested edited image.
-
#
-
# @see OpenAI::Models::ImageEditPartialImageEvent#background
-
1
module Background
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TRANSPARENT = :transparent
-
1
OPAQUE = :opaque
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The output format for the requested edited image.
-
#
-
# @see OpenAI::Models::ImageEditPartialImageEvent#output_format
-
1
module OutputFormat
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
PNG = :png
-
1
WEBP = :webp
-
1
JPEG = :jpeg
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The quality setting for the requested edited image.
-
#
-
# @see OpenAI::Models::ImageEditPartialImageEvent#quality
-
1
module Quality
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
LOW = :low
-
1
MEDIUM = :medium
-
1
HIGH = :high
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The size of the requested edited image.
-
#
-
# @see OpenAI::Models::ImageEditPartialImageEvent#size
-
1
module Size
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
SIZE_1024X1024 = :"1024x1024"
-
1
SIZE_1024X1536 = :"1024x1536"
-
1
SIZE_1536X1024 = :"1536x1024"
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# Emitted when a partial image is available during image editing streaming.
-
1
module ImageEditStreamEvent
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# Emitted when a partial image is available during image editing streaming.
-
1
variant :"image_edit.partial_image", -> { OpenAI::ImageEditPartialImageEvent }
-
-
# Emitted when image editing has completed and the final image is available.
-
1
variant :"image_edit.completed", -> { OpenAI::ImageEditCompletedEvent }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::ImageEditPartialImageEvent, OpenAI::Models::ImageEditCompletedEvent)]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class ImageGenCompletedEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute b64_json
-
# Base64-encoded image data, suitable for rendering as an image.
-
#
-
# @return [String]
-
1
required :b64_json, String
-
-
# @!attribute background
-
# The background setting for the generated image.
-
#
-
# @return [Symbol, OpenAI::Models::ImageGenCompletedEvent::Background]
-
1
required :background, enum: -> { OpenAI::ImageGenCompletedEvent::Background }
-
-
# @!attribute created_at
-
# The Unix timestamp when the event was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute output_format
-
# The output format for the generated image.
-
#
-
# @return [Symbol, OpenAI::Models::ImageGenCompletedEvent::OutputFormat]
-
1
required :output_format, enum: -> { OpenAI::ImageGenCompletedEvent::OutputFormat }
-
-
# @!attribute quality
-
# The quality setting for the generated image.
-
#
-
# @return [Symbol, OpenAI::Models::ImageGenCompletedEvent::Quality]
-
1
required :quality, enum: -> { OpenAI::ImageGenCompletedEvent::Quality }
-
-
# @!attribute size
-
# The size of the generated image.
-
#
-
# @return [Symbol, OpenAI::Models::ImageGenCompletedEvent::Size]
-
1
required :size, enum: -> { OpenAI::ImageGenCompletedEvent::Size }
-
-
# @!attribute type
-
# The type of the event. Always `image_generation.completed`.
-
#
-
# @return [Symbol, :"image_generation.completed"]
-
1
required :type, const: :"image_generation.completed"
-
-
# @!attribute usage
-
# For `gpt-image-1` only, the token usage information for the image generation.
-
#
-
# @return [OpenAI::Models::ImageGenCompletedEvent::Usage]
-
1
required :usage, -> { OpenAI::ImageGenCompletedEvent::Usage }
-
-
# @!method initialize(b64_json:, background:, created_at:, output_format:, quality:, size:, usage:, type: :"image_generation.completed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ImageGenCompletedEvent} for more details.
-
#
-
# Emitted when image generation has completed and the final image is available.
-
#
-
# @param b64_json [String] Base64-encoded image data, suitable for rendering as an image.
-
#
-
# @param background [Symbol, OpenAI::Models::ImageGenCompletedEvent::Background] The background setting for the generated image.
-
#
-
# @param created_at [Integer] The Unix timestamp when the event was created.
-
#
-
# @param output_format [Symbol, OpenAI::Models::ImageGenCompletedEvent::OutputFormat] The output format for the generated image.
-
#
-
# @param quality [Symbol, OpenAI::Models::ImageGenCompletedEvent::Quality] The quality setting for the generated image.
-
#
-
# @param size [Symbol, OpenAI::Models::ImageGenCompletedEvent::Size] The size of the generated image.
-
#
-
# @param usage [OpenAI::Models::ImageGenCompletedEvent::Usage] For `gpt-image-1` only, the token usage information for the image generation.
-
#
-
# @param type [Symbol, :"image_generation.completed"] The type of the event. Always `image_generation.completed`.
-
-
# The background setting for the generated image.
-
#
-
# @see OpenAI::Models::ImageGenCompletedEvent#background
-
1
module Background
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TRANSPARENT = :transparent
-
1
OPAQUE = :opaque
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The output format for the generated image.
-
#
-
# @see OpenAI::Models::ImageGenCompletedEvent#output_format
-
1
module OutputFormat
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
PNG = :png
-
1
WEBP = :webp
-
1
JPEG = :jpeg
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The quality setting for the generated image.
-
#
-
# @see OpenAI::Models::ImageGenCompletedEvent#quality
-
1
module Quality
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
LOW = :low
-
1
MEDIUM = :medium
-
1
HIGH = :high
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The size of the generated image.
-
#
-
# @see OpenAI::Models::ImageGenCompletedEvent#size
-
1
module Size
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
SIZE_1024X1024 = :"1024x1024"
-
1
SIZE_1024X1536 = :"1024x1536"
-
1
SIZE_1536X1024 = :"1536x1024"
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @see OpenAI::Models::ImageGenCompletedEvent#usage
-
1
class Usage < OpenAI::Internal::Type::BaseModel
-
# @!attribute input_tokens
-
# The number of tokens (images and text) in the input prompt.
-
#
-
# @return [Integer]
-
1
required :input_tokens, Integer
-
-
# @!attribute input_tokens_details
-
# The input tokens detailed information for the image generation.
-
#
-
# @return [OpenAI::Models::ImageGenCompletedEvent::Usage::InputTokensDetails]
-
1
required :input_tokens_details, -> { OpenAI::ImageGenCompletedEvent::Usage::InputTokensDetails }
-
-
# @!attribute output_tokens
-
# The number of image tokens in the output image.
-
#
-
# @return [Integer]
-
1
required :output_tokens, Integer
-
-
# @!attribute total_tokens
-
# The total number of tokens (images and text) used for the image generation.
-
#
-
# @return [Integer]
-
1
required :total_tokens, Integer
-
-
# @!method initialize(input_tokens:, input_tokens_details:, output_tokens:, total_tokens:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ImageGenCompletedEvent::Usage} for more details.
-
#
-
# For `gpt-image-1` only, the token usage information for the image generation.
-
#
-
# @param input_tokens [Integer] The number of tokens (images and text) in the input prompt.
-
#
-
# @param input_tokens_details [OpenAI::Models::ImageGenCompletedEvent::Usage::InputTokensDetails] The input tokens detailed information for the image generation.
-
#
-
# @param output_tokens [Integer] The number of image tokens in the output image.
-
#
-
# @param total_tokens [Integer] The total number of tokens (images and text) used for the image generation.
-
-
# @see OpenAI::Models::ImageGenCompletedEvent::Usage#input_tokens_details
-
1
class InputTokensDetails < OpenAI::Internal::Type::BaseModel
-
# @!attribute image_tokens
-
# The number of image tokens in the input prompt.
-
#
-
# @return [Integer]
-
1
required :image_tokens, Integer
-
-
# @!attribute text_tokens
-
# The number of text tokens in the input prompt.
-
#
-
# @return [Integer]
-
1
required :text_tokens, Integer
-
-
# @!method initialize(image_tokens:, text_tokens:)
-
# The input tokens detailed information for the image generation.
-
#
-
# @param image_tokens [Integer] The number of image tokens in the input prompt.
-
#
-
# @param text_tokens [Integer] The number of text tokens in the input prompt.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class ImageGenPartialImageEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute b64_json
-
# Base64-encoded partial image data, suitable for rendering as an image.
-
#
-
# @return [String]
-
1
required :b64_json, String
-
-
# @!attribute background
-
# The background setting for the requested image.
-
#
-
# @return [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Background]
-
1
required :background, enum: -> { OpenAI::ImageGenPartialImageEvent::Background }
-
-
# @!attribute created_at
-
# The Unix timestamp when the event was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute output_format
-
# The output format for the requested image.
-
#
-
# @return [Symbol, OpenAI::Models::ImageGenPartialImageEvent::OutputFormat]
-
1
required :output_format, enum: -> { OpenAI::ImageGenPartialImageEvent::OutputFormat }
-
-
# @!attribute partial_image_index
-
# 0-based index for the partial image (streaming).
-
#
-
# @return [Integer]
-
1
required :partial_image_index, Integer
-
-
# @!attribute quality
-
# The quality setting for the requested image.
-
#
-
# @return [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Quality]
-
1
required :quality, enum: -> { OpenAI::ImageGenPartialImageEvent::Quality }
-
-
# @!attribute size
-
# The size of the requested image.
-
#
-
# @return [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Size]
-
1
required :size, enum: -> { OpenAI::ImageGenPartialImageEvent::Size }
-
-
# @!attribute type
-
# The type of the event. Always `image_generation.partial_image`.
-
#
-
# @return [Symbol, :"image_generation.partial_image"]
-
1
required :type, const: :"image_generation.partial_image"
-
-
# @!method initialize(b64_json:, background:, created_at:, output_format:, partial_image_index:, quality:, size:, type: :"image_generation.partial_image")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ImageGenPartialImageEvent} for more details.
-
#
-
# Emitted when a partial image is available during image generation streaming.
-
#
-
# @param b64_json [String] Base64-encoded partial image data, suitable for rendering as an image.
-
#
-
# @param background [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Background] The background setting for the requested image.
-
#
-
# @param created_at [Integer] The Unix timestamp when the event was created.
-
#
-
# @param output_format [Symbol, OpenAI::Models::ImageGenPartialImageEvent::OutputFormat] The output format for the requested image.
-
#
-
# @param partial_image_index [Integer] 0-based index for the partial image (streaming).
-
#
-
# @param quality [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Quality] The quality setting for the requested image.
-
#
-
# @param size [Symbol, OpenAI::Models::ImageGenPartialImageEvent::Size] The size of the requested image.
-
#
-
# @param type [Symbol, :"image_generation.partial_image"] The type of the event. Always `image_generation.partial_image`.
-
-
# The background setting for the requested image.
-
#
-
# @see OpenAI::Models::ImageGenPartialImageEvent#background
-
1
module Background
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TRANSPARENT = :transparent
-
1
OPAQUE = :opaque
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The output format for the requested image.
-
#
-
# @see OpenAI::Models::ImageGenPartialImageEvent#output_format
-
1
module OutputFormat
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
PNG = :png
-
1
WEBP = :webp
-
1
JPEG = :jpeg
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The quality setting for the requested image.
-
#
-
# @see OpenAI::Models::ImageGenPartialImageEvent#quality
-
1
module Quality
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
LOW = :low
-
1
MEDIUM = :medium
-
1
HIGH = :high
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The size of the requested image.
-
#
-
# @see OpenAI::Models::ImageGenPartialImageEvent#size
-
1
module Size
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
SIZE_1024X1024 = :"1024x1024"
-
1
SIZE_1024X1536 = :"1024x1536"
-
1
SIZE_1536X1024 = :"1536x1024"
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# Emitted when a partial image is available during image generation streaming.
-
1
module ImageGenStreamEvent
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# Emitted when a partial image is available during image generation streaming.
-
1
variant :"image_generation.partial_image", -> { OpenAI::ImageGenPartialImageEvent }
-
-
# Emitted when image generation has completed and the final image is available.
-
1
variant :"image_generation.completed", -> { OpenAI::ImageGenCompletedEvent }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::ImageGenPartialImageEvent, OpenAI::Models::ImageGenCompletedEvent)]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Images#generate
-
#
-
# @see OpenAI::Resources::Images#generate_stream_raw
-
1
class ImageGenerateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute prompt
-
# A text description of the desired image(s). The maximum length is 32000
-
# characters for `gpt-image-1`, 1000 characters for `dall-e-2` and 4000 characters
-
# for `dall-e-3`.
-
#
-
# @return [String]
-
1
required :prompt, String
-
-
# @!attribute background
-
# Allows to set transparency for the background of the generated image(s). This
-
# parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
-
# `opaque` or `auto` (default value). When `auto` is used, the model will
-
# automatically determine the best background for the image.
-
#
-
# If `transparent`, the output format needs to support transparency, so it should
-
# be set to either `png` (default value) or `webp`.
-
#
-
# @return [Symbol, OpenAI::Models::ImageGenerateParams::Background, nil]
-
1
optional :background, enum: -> { OpenAI::ImageGenerateParams::Background }, nil?: true
-
-
# @!attribute model
-
# The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
-
# `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
-
# `gpt-image-1` is used.
-
#
-
# @return [String, Symbol, OpenAI::Models::ImageModel, nil]
-
1
optional :model, union: -> { OpenAI::ImageGenerateParams::Model }, nil?: true
-
-
# @!attribute moderation
-
# Control the content-moderation level for images generated by `gpt-image-1`. Must
-
# be either `low` for less restrictive filtering or `auto` (default value).
-
#
-
# @return [Symbol, OpenAI::Models::ImageGenerateParams::Moderation, nil]
-
1
optional :moderation, enum: -> { OpenAI::ImageGenerateParams::Moderation }, nil?: true
-
-
# @!attribute n
-
# The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
-
# `n=1` is supported.
-
#
-
# @return [Integer, nil]
-
1
optional :n, Integer, nil?: true
-
-
# @!attribute output_compression
-
# The compression level (0-100%) for the generated images. This parameter is only
-
# supported for `gpt-image-1` with the `webp` or `jpeg` output formats, and
-
# defaults to 100.
-
#
-
# @return [Integer, nil]
-
1
optional :output_compression, Integer, nil?: true
-
-
# @!attribute output_format
-
# The format in which the generated images are returned. This parameter is only
-
# supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
-
#
-
# @return [Symbol, OpenAI::Models::ImageGenerateParams::OutputFormat, nil]
-
1
optional :output_format, enum: -> { OpenAI::ImageGenerateParams::OutputFormat }, nil?: true
-
-
# @!attribute partial_images
-
# The number of partial images to generate. This parameter is used for streaming
-
# responses that return partial images. Value must be between 0 and 3. When set to
-
# 0, the response will be a single image sent in one streaming event.
-
#
-
# Note that the final image may be sent before the full number of partial images
-
# are generated if the full image is generated more quickly.
-
#
-
# @return [Integer, nil]
-
1
optional :partial_images, Integer, nil?: true
-
-
# @!attribute quality
-
# The quality of the image that will be generated.
-
#
-
# - `auto` (default value) will automatically select the best quality for the
-
# given model.
-
# - `high`, `medium` and `low` are supported for `gpt-image-1`.
-
# - `hd` and `standard` are supported for `dall-e-3`.
-
# - `standard` is the only option for `dall-e-2`.
-
#
-
# @return [Symbol, OpenAI::Models::ImageGenerateParams::Quality, nil]
-
1
optional :quality, enum: -> { OpenAI::ImageGenerateParams::Quality }, nil?: true
-
-
# @!attribute response_format
-
# The format in which generated images with `dall-e-2` and `dall-e-3` are
-
# returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
-
# after the image has been generated. This parameter isn't supported for
-
# `gpt-image-1` which will always return base64-encoded images.
-
#
-
# @return [Symbol, OpenAI::Models::ImageGenerateParams::ResponseFormat, nil]
-
1
optional :response_format, enum: -> { OpenAI::ImageGenerateParams::ResponseFormat }, nil?: true
-
-
# @!attribute size
-
# The size of the generated images. Must be one of `1024x1024`, `1536x1024`
-
# (landscape), `1024x1536` (portrait), or `auto` (default value) for
-
# `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
-
# one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
-
#
-
# @return [Symbol, OpenAI::Models::ImageGenerateParams::Size, nil]
-
1
optional :size, enum: -> { OpenAI::ImageGenerateParams::Size }, nil?: true
-
-
# @!attribute style
-
# The style of the generated images. This parameter is only supported for
-
# `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
-
# towards generating hyper-real and dramatic images. Natural causes the model to
-
# produce more natural, less hyper-real looking images.
-
#
-
# @return [Symbol, OpenAI::Models::ImageGenerateParams::Style, nil]
-
1
optional :style, enum: -> { OpenAI::ImageGenerateParams::Style }, nil?: true
-
-
# @!attribute user
-
# A unique identifier representing your end-user, which can help OpenAI to monitor
-
# and detect abuse.
-
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
-
#
-
# @return [String, nil]
-
1
optional :user, String
-
-
# @!method initialize(prompt:, background: nil, model: nil, moderation: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, style: nil, user: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ImageGenerateParams} for more details.
-
#
-
# @param prompt [String] A text description of the desired image(s). The maximum length is 32000 characte
-
#
-
# @param background [Symbol, OpenAI::Models::ImageGenerateParams::Background, nil] Allows to set transparency for the background of the generated image(s).
-
#
-
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or `gpt-im
-
#
-
# @param moderation [Symbol, OpenAI::Models::ImageGenerateParams::Moderation, nil] Control the content-moderation level for images generated by `gpt-image-1`. Must
-
#
-
# @param n [Integer, nil] The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
-
#
-
# @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter is only
-
#
-
# @param output_format [Symbol, OpenAI::Models::ImageGenerateParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is only su
-
#
-
# @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for
-
#
-
# @param quality [Symbol, OpenAI::Models::ImageGenerateParams::Quality, nil] The quality of the image that will be generated.
-
#
-
# @param response_format [Symbol, OpenAI::Models::ImageGenerateParams::ResponseFormat, nil] The format in which generated images with `dall-e-2` and `dall-e-3` are returned
-
#
-
# @param size [Symbol, OpenAI::Models::ImageGenerateParams::Size, nil] The size of the generated images. Must be one of `1024x1024`, `1536x1024` (lands
-
#
-
# @param style [Symbol, OpenAI::Models::ImageGenerateParams::Style, nil] The style of the generated images. This parameter is only supported for `dall-e-
-
#
-
# @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Allows to set transparency for the background of the generated image(s). This
-
# parameter is only supported for `gpt-image-1`. Must be one of `transparent`,
-
# `opaque` or `auto` (default value). When `auto` is used, the model will
-
# automatically determine the best background for the image.
-
#
-
# If `transparent`, the output format needs to support transparency, so it should
-
# be set to either `png` (default value) or `webp`.
-
1
module Background
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TRANSPARENT = :transparent
-
1
OPAQUE = :opaque
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or
-
# `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to
-
# `gpt-image-1` is used.
-
1
module Model
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
# The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or `gpt-image-1`. Defaults to `dall-e-2` unless a parameter specific to `gpt-image-1` is used.
-
1
variant enum: -> { OpenAI::ImageModel }
-
-
# @!method self.variants
-
# @return [Array(String, Symbol, OpenAI::Models::ImageModel)]
-
end
-
-
# Control the content-moderation level for images generated by `gpt-image-1`. Must
-
# be either `low` for less restrictive filtering or `auto` (default value).
-
1
module Moderation
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
LOW = :low
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The format in which the generated images are returned. This parameter is only
-
# supported for `gpt-image-1`. Must be one of `png`, `jpeg`, or `webp`.
-
1
module OutputFormat
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
PNG = :png
-
1
JPEG = :jpeg
-
1
WEBP = :webp
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The quality of the image that will be generated.
-
#
-
# - `auto` (default value) will automatically select the best quality for the
-
# given model.
-
# - `high`, `medium` and `low` are supported for `gpt-image-1`.
-
# - `hd` and `standard` are supported for `dall-e-3`.
-
# - `standard` is the only option for `dall-e-2`.
-
1
module Quality
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
STANDARD = :standard
-
1
HD = :hd
-
1
LOW = :low
-
1
MEDIUM = :medium
-
1
HIGH = :high
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The format in which generated images with `dall-e-2` and `dall-e-3` are
-
# returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes
-
# after the image has been generated. This parameter isn't supported for
-
# `gpt-image-1` which will always return base64-encoded images.
-
1
module ResponseFormat
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
URL = :url
-
1
B64_JSON = :b64_json
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The size of the generated images. Must be one of `1024x1024`, `1536x1024`
-
# (landscape), `1024x1536` (portrait), or `auto` (default value) for
-
# `gpt-image-1`, one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`, and
-
# one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3`.
-
1
module Size
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
SIZE_1024X1024 = :"1024x1024"
-
1
SIZE_1536X1024 = :"1536x1024"
-
1
SIZE_1024X1536 = :"1024x1536"
-
1
SIZE_256X256 = :"256x256"
-
1
SIZE_512X512 = :"512x512"
-
1
SIZE_1792X1024 = :"1792x1024"
-
1
SIZE_1024X1792 = :"1024x1792"
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The style of the generated images. This parameter is only supported for
-
# `dall-e-3`. Must be one of `vivid` or `natural`. Vivid causes the model to lean
-
# towards generating hyper-real and dramatic images. Natural causes the model to
-
# produce more natural, less hyper-real looking images.
-
1
module Style
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
VIVID = :vivid
-
1
NATURAL = :natural
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module ImageModel
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
DALL_E_2 = :"dall-e-2"
-
1
DALL_E_3 = :"dall-e-3"
-
1
GPT_IMAGE_1 = :"gpt-image-1"
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Images#create_variation
-
1
class ImagesResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute created
-
# The Unix timestamp (in seconds) of when the image was created.
-
#
-
# @return [Integer]
-
1
required :created, Integer
-
-
# @!attribute background
-
# The background parameter used for the image generation. Either `transparent` or
-
# `opaque`.
-
#
-
# @return [Symbol, OpenAI::Models::ImagesResponse::Background, nil]
-
1
optional :background, enum: -> { OpenAI::ImagesResponse::Background }
-
-
# @!attribute data
-
# The list of generated images.
-
#
-
# @return [Array<OpenAI::Models::Image>, nil]
-
1
optional :data, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Image] }
-
-
# @!attribute output_format
-
# The output format of the image generation. Either `png`, `webp`, or `jpeg`.
-
#
-
# @return [Symbol, OpenAI::Models::ImagesResponse::OutputFormat, nil]
-
1
optional :output_format, enum: -> { OpenAI::ImagesResponse::OutputFormat }
-
-
# @!attribute quality
-
# The quality of the image generated. Either `low`, `medium`, or `high`.
-
#
-
# @return [Symbol, OpenAI::Models::ImagesResponse::Quality, nil]
-
1
optional :quality, enum: -> { OpenAI::ImagesResponse::Quality }
-
-
# @!attribute size
-
# The size of the image generated. Either `1024x1024`, `1024x1536`, or
-
# `1536x1024`.
-
#
-
# @return [Symbol, OpenAI::Models::ImagesResponse::Size, nil]
-
1
optional :size, enum: -> { OpenAI::ImagesResponse::Size }
-
-
# @!attribute usage
-
# For `gpt-image-1` only, the token usage information for the image generation.
-
#
-
# @return [OpenAI::Models::ImagesResponse::Usage, nil]
-
1
optional :usage, -> { OpenAI::ImagesResponse::Usage }
-
-
# @!method initialize(created:, background: nil, data: nil, output_format: nil, quality: nil, size: nil, usage: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ImagesResponse} for more details.
-
#
-
# The response from the image generation endpoint.
-
#
-
# @param created [Integer] The Unix timestamp (in seconds) of when the image was created.
-
#
-
# @param background [Symbol, OpenAI::Models::ImagesResponse::Background] The background parameter used for the image generation. Either `transparent` or
-
#
-
# @param data [Array<OpenAI::Models::Image>] The list of generated images.
-
#
-
# @param output_format [Symbol, OpenAI::Models::ImagesResponse::OutputFormat] The output format of the image generation. Either `png`, `webp`, or `jpeg`.
-
#
-
# @param quality [Symbol, OpenAI::Models::ImagesResponse::Quality] The quality of the image generated. Either `low`, `medium`, or `high`.
-
#
-
# @param size [Symbol, OpenAI::Models::ImagesResponse::Size] The size of the image generated. Either `1024x1024`, `1024x1536`, or `1536x1024`
-
#
-
# @param usage [OpenAI::Models::ImagesResponse::Usage] For `gpt-image-1` only, the token usage information for the image generation.
-
-
# The background parameter used for the image generation. Either `transparent` or
-
# `opaque`.
-
#
-
# @see OpenAI::Models::ImagesResponse#background
-
1
module Background
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TRANSPARENT = :transparent
-
1
OPAQUE = :opaque
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The output format of the image generation. Either `png`, `webp`, or `jpeg`.
-
#
-
# @see OpenAI::Models::ImagesResponse#output_format
-
1
module OutputFormat
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
PNG = :png
-
1
WEBP = :webp
-
1
JPEG = :jpeg
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The quality of the image generated. Either `low`, `medium`, or `high`.
-
#
-
# @see OpenAI::Models::ImagesResponse#quality
-
1
module Quality
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
LOW = :low
-
1
MEDIUM = :medium
-
1
HIGH = :high
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The size of the image generated. Either `1024x1024`, `1024x1536`, or
-
# `1536x1024`.
-
#
-
# @see OpenAI::Models::ImagesResponse#size
-
1
module Size
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
SIZE_1024X1024 = :"1024x1024"
-
1
SIZE_1024X1536 = :"1024x1536"
-
1
SIZE_1536X1024 = :"1536x1024"
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @see OpenAI::Models::ImagesResponse#usage
-
1
class Usage < OpenAI::Internal::Type::BaseModel
-
# @!attribute input_tokens
-
# The number of tokens (images and text) in the input prompt.
-
#
-
# @return [Integer]
-
1
required :input_tokens, Integer
-
-
# @!attribute input_tokens_details
-
# The input tokens detailed information for the image generation.
-
#
-
# @return [OpenAI::Models::ImagesResponse::Usage::InputTokensDetails]
-
1
required :input_tokens_details, -> { OpenAI::ImagesResponse::Usage::InputTokensDetails }
-
-
# @!attribute output_tokens
-
# The number of output tokens generated by the model.
-
#
-
# @return [Integer]
-
1
required :output_tokens, Integer
-
-
# @!attribute total_tokens
-
# The total number of tokens (images and text) used for the image generation.
-
#
-
# @return [Integer]
-
1
required :total_tokens, Integer
-
-
# @!method initialize(input_tokens:, input_tokens_details:, output_tokens:, total_tokens:)
-
# For `gpt-image-1` only, the token usage information for the image generation.
-
#
-
# @param input_tokens [Integer] The number of tokens (images and text) in the input prompt.
-
#
-
# @param input_tokens_details [OpenAI::Models::ImagesResponse::Usage::InputTokensDetails] The input tokens detailed information for the image generation.
-
#
-
# @param output_tokens [Integer] The number of output tokens generated by the model.
-
#
-
# @param total_tokens [Integer] The total number of tokens (images and text) used for the image generation.
-
-
# @see OpenAI::Models::ImagesResponse::Usage#input_tokens_details
-
1
class InputTokensDetails < OpenAI::Internal::Type::BaseModel
-
# @!attribute image_tokens
-
# The number of image tokens in the input prompt.
-
#
-
# @return [Integer]
-
1
required :image_tokens, Integer
-
-
# @!attribute text_tokens
-
# The number of text tokens in the input prompt.
-
#
-
# @return [Integer]
-
1
required :text_tokens, Integer
-
-
# @!method initialize(image_tokens:, text_tokens:)
-
# The input tokens detailed information for the image generation.
-
#
-
# @param image_tokens [Integer] The number of image tokens in the input prompt.
-
#
-
# @param text_tokens [Integer] The number of text tokens in the input prompt.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @type [OpenAI::Internal::Type::Converter]
-
1
Metadata = OpenAI::Internal::Type::HashOf[String]
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Models#retrieve
-
1
class Model < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The model identifier, which can be referenced in the API endpoints.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created
-
# The Unix timestamp (in seconds) when the model was created.
-
#
-
# @return [Integer]
-
1
required :created, Integer
-
-
# @!attribute object
-
# The object type, which is always "model".
-
#
-
# @return [Symbol, :model]
-
1
required :object, const: :model
-
-
# @!attribute owned_by
-
# The organization that owns the model.
-
#
-
# @return [String]
-
1
required :owned_by, String
-
-
# @!method initialize(id:, created:, owned_by:, object: :model)
-
# Describes an OpenAI model offering that can be used with the API.
-
#
-
# @param id [String] The model identifier, which can be referenced in the API endpoints.
-
#
-
# @param created [Integer] The Unix timestamp (in seconds) when the model was created.
-
#
-
# @param owned_by [String] The organization that owns the model.
-
#
-
# @param object [Symbol, :model] The object type, which is always "model".
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Models#delete
-
1
class ModelDeleteParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Models#delete
-
1
class ModelDeleted < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute deleted
-
#
-
# @return [Boolean]
-
1
required :deleted, OpenAI::Internal::Type::Boolean
-
-
# @!attribute object
-
#
-
# @return [String]
-
1
required :object, String
-
-
# @!method initialize(id:, deleted:, object:)
-
# @param id [String]
-
# @param deleted [Boolean]
-
# @param object [String]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Models#list
-
1
class ModelListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Models#retrieve
-
1
class ModelRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class Moderation < OpenAI::Internal::Type::BaseModel
-
# @!attribute categories
-
# A list of the categories, and whether they are flagged or not.
-
#
-
# @return [OpenAI::Models::Moderation::Categories]
-
1
required :categories, -> { OpenAI::Moderation::Categories }
-
-
# @!attribute category_applied_input_types
-
# A list of the categories along with the input type(s) that the score applies to.
-
#
-
# @return [OpenAI::Models::Moderation::CategoryAppliedInputTypes]
-
1
required :category_applied_input_types, -> { OpenAI::Moderation::CategoryAppliedInputTypes }
-
-
# @!attribute category_scores
-
# A list of the categories along with their scores as predicted by model.
-
#
-
# @return [OpenAI::Models::Moderation::CategoryScores]
-
1
required :category_scores, -> { OpenAI::Moderation::CategoryScores }
-
-
# @!attribute flagged
-
# Whether any of the below categories are flagged.
-
#
-
# @return [Boolean]
-
1
required :flagged, OpenAI::Internal::Type::Boolean
-
-
# @!method initialize(categories:, category_applied_input_types:, category_scores:, flagged:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Moderation} for more details.
-
#
-
# @param categories [OpenAI::Models::Moderation::Categories] A list of the categories, and whether they are flagged or not.
-
#
-
# @param category_applied_input_types [OpenAI::Models::Moderation::CategoryAppliedInputTypes] A list of the categories along with the input type(s) that the score applies to.
-
#
-
# @param category_scores [OpenAI::Models::Moderation::CategoryScores] A list of the categories along with their scores as predicted by model.
-
#
-
# @param flagged [Boolean] Whether any of the below categories are flagged.
-
-
# @see OpenAI::Models::Moderation#categories
-
1
class Categories < OpenAI::Internal::Type::BaseModel
-
# @!attribute harassment
-
# Content that expresses, incites, or promotes harassing language towards any
-
# target.
-
#
-
# @return [Boolean]
-
1
required :harassment, OpenAI::Internal::Type::Boolean
-
-
# @!attribute harassment_threatening
-
# Harassment content that also includes violence or serious harm towards any
-
# target.
-
#
-
# @return [Boolean]
-
1
required :harassment_threatening, OpenAI::Internal::Type::Boolean, api_name: :"harassment/threatening"
-
-
# @!attribute hate
-
# Content that expresses, incites, or promotes hate based on race, gender,
-
# ethnicity, religion, nationality, sexual orientation, disability status, or
-
# caste. Hateful content aimed at non-protected groups (e.g., chess players) is
-
# harassment.
-
#
-
# @return [Boolean]
-
1
required :hate, OpenAI::Internal::Type::Boolean
-
-
# @!attribute hate_threatening
-
# Hateful content that also includes violence or serious harm towards the targeted
-
# group based on race, gender, ethnicity, religion, nationality, sexual
-
# orientation, disability status, or caste.
-
#
-
# @return [Boolean]
-
1
required :hate_threatening, OpenAI::Internal::Type::Boolean, api_name: :"hate/threatening"
-
-
# @!attribute illicit
-
# Content that includes instructions or advice that facilitate the planning or
-
# execution of wrongdoing, or that gives advice or instruction on how to commit
-
# illicit acts. For example, "how to shoplift" would fit this category.
-
#
-
# @return [Boolean, nil]
-
1
required :illicit, OpenAI::Internal::Type::Boolean, nil?: true
-
-
# @!attribute illicit_violent
-
# Content that includes instructions or advice that facilitate the planning or
-
# execution of wrongdoing that also includes violence, or that gives advice or
-
# instruction on the procurement of any weapon.
-
#
-
# @return [Boolean, nil]
-
1
required :illicit_violent, OpenAI::Internal::Type::Boolean, api_name: :"illicit/violent", nil?: true
-
-
# @!attribute self_harm
-
# Content that promotes, encourages, or depicts acts of self-harm, such as
-
# suicide, cutting, and eating disorders.
-
#
-
# @return [Boolean]
-
1
required :self_harm, OpenAI::Internal::Type::Boolean, api_name: :"self-harm"
-
-
# @!attribute self_harm_instructions
-
# Content that encourages performing acts of self-harm, such as suicide, cutting,
-
# and eating disorders, or that gives instructions or advice on how to commit such
-
# acts.
-
#
-
# @return [Boolean]
-
1
required :self_harm_instructions, OpenAI::Internal::Type::Boolean, api_name: :"self-harm/instructions"
-
-
# @!attribute self_harm_intent
-
# Content where the speaker expresses that they are engaging or intend to engage
-
# in acts of self-harm, such as suicide, cutting, and eating disorders.
-
#
-
# @return [Boolean]
-
1
required :self_harm_intent, OpenAI::Internal::Type::Boolean, api_name: :"self-harm/intent"
-
-
# @!attribute sexual
-
# Content meant to arouse sexual excitement, such as the description of sexual
-
# activity, or that promotes sexual services (excluding sex education and
-
# wellness).
-
#
-
# @return [Boolean]
-
1
required :sexual, OpenAI::Internal::Type::Boolean
-
-
# @!attribute sexual_minors
-
# Sexual content that includes an individual who is under 18 years old.
-
#
-
# @return [Boolean]
-
1
required :sexual_minors, OpenAI::Internal::Type::Boolean, api_name: :"sexual/minors"
-
-
# @!attribute violence
-
# Content that depicts death, violence, or physical injury.
-
#
-
# @return [Boolean]
-
1
required :violence, OpenAI::Internal::Type::Boolean
-
-
# @!attribute violence_graphic
-
# Content that depicts death, violence, or physical injury in graphic detail.
-
#
-
# @return [Boolean]
-
1
required :violence_graphic, OpenAI::Internal::Type::Boolean, api_name: :"violence/graphic"
-
-
# @!method initialize(harassment:, harassment_threatening:, hate:, hate_threatening:, illicit:, illicit_violent:, self_harm:, self_harm_instructions:, self_harm_intent:, sexual:, sexual_minors:, violence:, violence_graphic:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Moderation::Categories} for more details.
-
#
-
# A list of the categories, and whether they are flagged or not.
-
#
-
# @param harassment [Boolean] Content that expresses, incites, or promotes harassing language towards any targ
-
#
-
# @param harassment_threatening [Boolean] Harassment content that also includes violence or serious harm towards any targe
-
#
-
# @param hate [Boolean] Content that expresses, incites, or promotes hate based on race, gender, ethnici
-
#
-
# @param hate_threatening [Boolean] Hateful content that also includes violence or serious harm towards the targeted
-
#
-
# @param illicit [Boolean, nil] Content that includes instructions or advice that facilitate the planning or exe
-
#
-
# @param illicit_violent [Boolean, nil] Content that includes instructions or advice that facilitate the planning or exe
-
#
-
# @param self_harm [Boolean] Content that promotes, encourages, or depicts acts of self-harm, such as suicide
-
#
-
# @param self_harm_instructions [Boolean] Content that encourages performing acts of self-harm, such as suicide, cutting,
-
#
-
# @param self_harm_intent [Boolean] Content where the speaker expresses that they are engaging or intend to engage i
-
#
-
# @param sexual [Boolean] Content meant to arouse sexual excitement, such as the description of sexual act
-
#
-
# @param sexual_minors [Boolean] Sexual content that includes an individual who is under 18 years old.
-
#
-
# @param violence [Boolean] Content that depicts death, violence, or physical injury.
-
#
-
# @param violence_graphic [Boolean] Content that depicts death, violence, or physical injury in graphic detail.
-
end
-
-
# @see OpenAI::Models::Moderation#category_applied_input_types
-
1
class CategoryAppliedInputTypes < OpenAI::Internal::Type::BaseModel
-
# @!attribute harassment
-
# The applied input type(s) for the category 'harassment'.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Harassment>]
-
1
required :harassment,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Harassment]
-
}
-
-
# @!attribute harassment_threatening
-
# The applied input type(s) for the category 'harassment/threatening'.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::HarassmentThreatening>]
-
1
required :harassment_threatening,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::HarassmentThreatening]
-
},
-
api_name: :"harassment/threatening"
-
-
# @!attribute hate
-
# The applied input type(s) for the category 'hate'.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Hate>]
-
1
required :hate,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Hate]
-
}
-
-
# @!attribute hate_threatening
-
# The applied input type(s) for the category 'hate/threatening'.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::HateThreatening>]
-
1
required :hate_threatening,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::HateThreatening]
-
},
-
api_name: :"hate/threatening"
-
-
# @!attribute illicit
-
# The applied input type(s) for the category 'illicit'.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Illicit>]
-
1
required :illicit,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Illicit]
-
}
-
-
# @!attribute illicit_violent
-
# The applied input type(s) for the category 'illicit/violent'.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::IllicitViolent>]
-
1
required :illicit_violent,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::IllicitViolent]
-
},
-
api_name: :"illicit/violent"
-
-
# @!attribute self_harm
-
# The applied input type(s) for the category 'self-harm'.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::SelfHarm>]
-
1
required :self_harm,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarm]
-
},
-
api_name: :"self-harm"
-
-
# @!attribute self_harm_instructions
-
# The applied input type(s) for the category 'self-harm/instructions'.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::SelfHarmInstruction>]
-
1
required :self_harm_instructions,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarmInstruction]
-
},
-
api_name: :"self-harm/instructions"
-
-
# @!attribute self_harm_intent
-
# The applied input type(s) for the category 'self-harm/intent'.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::SelfHarmIntent>]
-
1
required :self_harm_intent,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::SelfHarmIntent]
-
},
-
api_name: :"self-harm/intent"
-
-
# @!attribute sexual
-
# The applied input type(s) for the category 'sexual'.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Sexual>]
-
1
required :sexual,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Sexual]
-
}
-
-
# @!attribute sexual_minors
-
# The applied input type(s) for the category 'sexual/minors'.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::SexualMinor>]
-
1
required :sexual_minors,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::SexualMinor]
-
},
-
api_name: :"sexual/minors"
-
-
# @!attribute violence
-
# The applied input type(s) for the category 'violence'.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Violence>]
-
1
required :violence,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::Violence]
-
}
-
-
# @!attribute violence_graphic
-
# The applied input type(s) for the category 'violence/graphic'.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::ViolenceGraphic>]
-
1
required :violence_graphic,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Moderation::CategoryAppliedInputTypes::ViolenceGraphic]
-
},
-
api_name: :"violence/graphic"
-
-
# @!method initialize(harassment:, harassment_threatening:, hate:, hate_threatening:, illicit:, illicit_violent:, self_harm:, self_harm_instructions:, self_harm_intent:, sexual:, sexual_minors:, violence:, violence_graphic:)
-
# A list of the categories along with the input type(s) that the score applies to.
-
#
-
# @param harassment [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Harassment>] The applied input type(s) for the category 'harassment'.
-
#
-
# @param harassment_threatening [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::HarassmentThreatening>] The applied input type(s) for the category 'harassment/threatening'.
-
#
-
# @param hate [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Hate>] The applied input type(s) for the category 'hate'.
-
#
-
# @param hate_threatening [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::HateThreatening>] The applied input type(s) for the category 'hate/threatening'.
-
#
-
# @param illicit [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Illicit>] The applied input type(s) for the category 'illicit'.
-
#
-
# @param illicit_violent [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::IllicitViolent>] The applied input type(s) for the category 'illicit/violent'.
-
#
-
# @param self_harm [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::SelfHarm>] The applied input type(s) for the category 'self-harm'.
-
#
-
# @param self_harm_instructions [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::SelfHarmInstruction>] The applied input type(s) for the category 'self-harm/instructions'.
-
#
-
# @param self_harm_intent [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::SelfHarmIntent>] The applied input type(s) for the category 'self-harm/intent'.
-
#
-
# @param sexual [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Sexual>] The applied input type(s) for the category 'sexual'.
-
#
-
# @param sexual_minors [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::SexualMinor>] The applied input type(s) for the category 'sexual/minors'.
-
#
-
# @param violence [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::Violence>] The applied input type(s) for the category 'violence'.
-
#
-
# @param violence_graphic [Array<Symbol, OpenAI::Models::Moderation::CategoryAppliedInputTypes::ViolenceGraphic>] The applied input type(s) for the category 'violence/graphic'.
-
-
1
module Harassment
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TEXT = :text
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
module HarassmentThreatening
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TEXT = :text
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
module Hate
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TEXT = :text
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
module HateThreatening
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TEXT = :text
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
module Illicit
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TEXT = :text
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
module IllicitViolent
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TEXT = :text
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
module SelfHarm
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TEXT = :text
-
1
IMAGE = :image
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
module SelfHarmInstruction
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TEXT = :text
-
1
IMAGE = :image
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
module SelfHarmIntent
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TEXT = :text
-
1
IMAGE = :image
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
module Sexual
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TEXT = :text
-
1
IMAGE = :image
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
module SexualMinor
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TEXT = :text
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
module Violence
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TEXT = :text
-
1
IMAGE = :image
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
module ViolenceGraphic
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TEXT = :text
-
1
IMAGE = :image
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
# @see OpenAI::Models::Moderation#category_scores
-
1
class CategoryScores < OpenAI::Internal::Type::BaseModel
-
# @!attribute harassment
-
# The score for the category 'harassment'.
-
#
-
# @return [Float]
-
1
required :harassment, Float
-
-
# @!attribute harassment_threatening
-
# The score for the category 'harassment/threatening'.
-
#
-
# @return [Float]
-
1
required :harassment_threatening, Float, api_name: :"harassment/threatening"
-
-
# @!attribute hate
-
# The score for the category 'hate'.
-
#
-
# @return [Float]
-
1
required :hate, Float
-
-
# @!attribute hate_threatening
-
# The score for the category 'hate/threatening'.
-
#
-
# @return [Float]
-
1
required :hate_threatening, Float, api_name: :"hate/threatening"
-
-
# @!attribute illicit
-
# The score for the category 'illicit'.
-
#
-
# @return [Float]
-
1
required :illicit, Float
-
-
# @!attribute illicit_violent
-
# The score for the category 'illicit/violent'.
-
#
-
# @return [Float]
-
1
required :illicit_violent, Float, api_name: :"illicit/violent"
-
-
# @!attribute self_harm
-
# The score for the category 'self-harm'.
-
#
-
# @return [Float]
-
1
required :self_harm, Float, api_name: :"self-harm"
-
-
# @!attribute self_harm_instructions
-
# The score for the category 'self-harm/instructions'.
-
#
-
# @return [Float]
-
1
required :self_harm_instructions, Float, api_name: :"self-harm/instructions"
-
-
# @!attribute self_harm_intent
-
# The score for the category 'self-harm/intent'.
-
#
-
# @return [Float]
-
1
required :self_harm_intent, Float, api_name: :"self-harm/intent"
-
-
# @!attribute sexual
-
# The score for the category 'sexual'.
-
#
-
# @return [Float]
-
1
required :sexual, Float
-
-
# @!attribute sexual_minors
-
# The score for the category 'sexual/minors'.
-
#
-
# @return [Float]
-
1
required :sexual_minors, Float, api_name: :"sexual/minors"
-
-
# @!attribute violence
-
# The score for the category 'violence'.
-
#
-
# @return [Float]
-
1
required :violence, Float
-
-
# @!attribute violence_graphic
-
# The score for the category 'violence/graphic'.
-
#
-
# @return [Float]
-
1
required :violence_graphic, Float, api_name: :"violence/graphic"
-
-
# @!method initialize(harassment:, harassment_threatening:, hate:, hate_threatening:, illicit:, illicit_violent:, self_harm:, self_harm_instructions:, self_harm_intent:, sexual:, sexual_minors:, violence:, violence_graphic:)
-
# A list of the categories along with their scores as predicted by model.
-
#
-
# @param harassment [Float] The score for the category 'harassment'.
-
#
-
# @param harassment_threatening [Float] The score for the category 'harassment/threatening'.
-
#
-
# @param hate [Float] The score for the category 'hate'.
-
#
-
# @param hate_threatening [Float] The score for the category 'hate/threatening'.
-
#
-
# @param illicit [Float] The score for the category 'illicit'.
-
#
-
# @param illicit_violent [Float] The score for the category 'illicit/violent'.
-
#
-
# @param self_harm [Float] The score for the category 'self-harm'.
-
#
-
# @param self_harm_instructions [Float] The score for the category 'self-harm/instructions'.
-
#
-
# @param self_harm_intent [Float] The score for the category 'self-harm/intent'.
-
#
-
# @param sexual [Float] The score for the category 'sexual'.
-
#
-
# @param sexual_minors [Float] The score for the category 'sexual/minors'.
-
#
-
# @param violence [Float] The score for the category 'violence'.
-
#
-
# @param violence_graphic [Float] The score for the category 'violence/graphic'.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Moderations#create
-
1
class ModerationCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute input
-
# Input (or inputs) to classify. Can be a single string, an array of strings, or
-
# an array of multi-modal input objects similar to other models.
-
#
-
# @return [String, Array<String>, Array<OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput>]
-
1
required :input, union: -> { OpenAI::ModerationCreateParams::Input }
-
-
# @!attribute model
-
# The content moderation model you would like to use. Learn more in
-
# [the moderation guide](https://platform.openai.com/docs/guides/moderation), and
-
# learn about available models
-
# [here](https://platform.openai.com/docs/models#moderation).
-
#
-
# @return [String, Symbol, OpenAI::Models::ModerationModel, nil]
-
1
optional :model, union: -> { OpenAI::ModerationCreateParams::Model }
-
-
# @!method initialize(input:, model: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ModerationCreateParams} for more details.
-
#
-
# @param input [String, Array<String>, Array<OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput>] Input (or inputs) to classify. Can be a single string, an array of strings, or
-
#
-
# @param model [String, Symbol, OpenAI::Models::ModerationModel] The content moderation model you would like to use. Learn more in
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Input (or inputs) to classify. Can be a single string, an array of strings, or
-
# an array of multi-modal input objects similar to other models.
-
1
module Input
-
1
extend OpenAI::Internal::Type::Union
-
-
# A string of text to classify for moderation.
-
1
variant String
-
-
# An array of strings to classify for moderation.
-
1
variant -> { OpenAI::Models::ModerationCreateParams::Input::StringArray }
-
-
# An array of multi-modal inputs to the moderation model.
-
1
variant -> { OpenAI::Models::ModerationCreateParams::Input::ModerationMultiModalInputArray }
-
-
# @!method self.variants
-
# @return [Array(String, Array<String>, Array<OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
StringArray = OpenAI::Internal::Type::ArrayOf[String]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
ModerationMultiModalInputArray =
-
1
OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::ModerationMultiModalInput }]
-
end
-
-
# The content moderation model you would like to use. Learn more in
-
# [the moderation guide](https://platform.openai.com/docs/guides/moderation), and
-
# learn about available models
-
# [here](https://platform.openai.com/docs/models#moderation).
-
1
module Model
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
# The content moderation model you would like to use. Learn more in
-
# [the moderation guide](https://platform.openai.com/docs/guides/moderation), and learn about
-
# available models [here](https://platform.openai.com/docs/models#moderation).
-
1
variant enum: -> { OpenAI::ModerationModel }
-
-
# @!method self.variants
-
# @return [Array(String, Symbol, OpenAI::Models::ModerationModel)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Moderations#create
-
1
class ModerationCreateResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique identifier for the moderation request.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute model
-
# The model used to generate the moderation results.
-
#
-
# @return [String]
-
1
required :model, String
-
-
# @!attribute results
-
# A list of moderation objects.
-
#
-
# @return [Array<OpenAI::Models::Moderation>]
-
1
required :results, -> { OpenAI::Internal::Type::ArrayOf[OpenAI::Moderation] }
-
-
# @!method initialize(id:, model:, results:)
-
# Represents if a given text input is potentially harmful.
-
#
-
# @param id [String] The unique identifier for the moderation request.
-
#
-
# @param model [String] The model used to generate the moderation results.
-
#
-
# @param results [Array<OpenAI::Models::Moderation>] A list of moderation objects.
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class ModerationImageURLInput < OpenAI::Internal::Type::BaseModel
-
# @!attribute image_url
-
# Contains either an image URL or a data URL for a base64 encoded image.
-
#
-
# @return [OpenAI::Models::ModerationImageURLInput::ImageURL]
-
1
required :image_url, -> { OpenAI::ModerationImageURLInput::ImageURL }
-
-
# @!attribute type
-
# Always `image_url`.
-
#
-
# @return [Symbol, :image_url]
-
1
required :type, const: :image_url
-
-
# @!method initialize(image_url:, type: :image_url)
-
# An object describing an image to classify.
-
#
-
# @param image_url [OpenAI::Models::ModerationImageURLInput::ImageURL] Contains either an image URL or a data URL for a base64 encoded image.
-
#
-
# @param type [Symbol, :image_url] Always `image_url`.
-
-
# @see OpenAI::Models::ModerationImageURLInput#image_url
-
1
class ImageURL < OpenAI::Internal::Type::BaseModel
-
# @!attribute url
-
# Either a URL of the image or the base64 encoded image data.
-
#
-
# @return [String]
-
1
required :url, String
-
-
# @!method initialize(url:)
-
# Contains either an image URL or a data URL for a base64 encoded image.
-
#
-
# @param url [String] Either a URL of the image or the base64 encoded image data.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module ModerationModel
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
OMNI_MODERATION_LATEST = :"omni-moderation-latest"
-
1
OMNI_MODERATION_2024_09_26 = :"omni-moderation-2024-09-26"
-
1
TEXT_MODERATION_LATEST = :"text-moderation-latest"
-
1
TEXT_MODERATION_STABLE = :"text-moderation-stable"
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# An object describing an image to classify.
-
1
module ModerationMultiModalInput
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# An object describing an image to classify.
-
1
variant :image_url, -> { OpenAI::ModerationImageURLInput }
-
-
# An object describing text to classify.
-
1
variant :text, -> { OpenAI::ModerationTextInput }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput)]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class ModerationTextInput < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# A string of text to classify.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# Always `text`.
-
#
-
# @return [Symbol, :text]
-
1
required :type, const: :text
-
-
# @!method initialize(text:, type: :text)
-
# An object describing text to classify.
-
#
-
# @param text [String] A string of text to classify.
-
#
-
# @param type [Symbol, :text] Always `text`.
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class OtherFileChunkingStrategyObject < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# Always `other`.
-
#
-
# @return [Symbol, :other]
-
1
required :type, const: :other
-
-
# @!method initialize(type: :other)
-
# This is returned when the chunking strategy is unknown. Typically, this is
-
# because the file was indexed before the `chunking_strategy` concept was
-
# introduced in the API.
-
#
-
# @param type [Symbol, :other] Always `other`.
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class Reasoning < OpenAI::Internal::Type::BaseModel
-
# @!attribute effort
-
# **o-series models only**
-
#
-
# Constrains effort on reasoning for
-
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-
# supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-
# result in faster responses and fewer tokens used on reasoning in a response.
-
#
-
# @return [Symbol, OpenAI::Models::ReasoningEffort, nil]
-
1
optional :effort, enum: -> { OpenAI::ReasoningEffort }, nil?: true
-
-
# @!attribute generate_summary
-
# @deprecated
-
#
-
# **Deprecated:** use `summary` instead.
-
#
-
# A summary of the reasoning performed by the model. This can be useful for
-
# debugging and understanding the model's reasoning process. One of `auto`,
-
# `concise`, or `detailed`.
-
#
-
# @return [Symbol, OpenAI::Models::Reasoning::GenerateSummary, nil]
-
1
optional :generate_summary, enum: -> { OpenAI::Reasoning::GenerateSummary }, nil?: true
-
-
# @!attribute summary
-
# A summary of the reasoning performed by the model. This can be useful for
-
# debugging and understanding the model's reasoning process. One of `auto`,
-
# `concise`, or `detailed`.
-
#
-
# @return [Symbol, OpenAI::Models::Reasoning::Summary, nil]
-
1
optional :summary, enum: -> { OpenAI::Reasoning::Summary }, nil?: true
-
-
# @!method initialize(effort: nil, generate_summary: nil, summary: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Reasoning} for more details.
-
#
-
# **o-series models only**
-
#
-
# Configuration options for
-
# [reasoning models](https://platform.openai.com/docs/guides/reasoning).
-
#
-
# @param effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
-
#
-
# @param generate_summary [Symbol, OpenAI::Models::Reasoning::GenerateSummary, nil] **Deprecated:** use `summary` instead.
-
#
-
# @param summary [Symbol, OpenAI::Models::Reasoning::Summary, nil] A summary of the reasoning performed by the model. This can be
-
-
# @deprecated
-
#
-
# **Deprecated:** use `summary` instead.
-
#
-
# A summary of the reasoning performed by the model. This can be useful for
-
# debugging and understanding the model's reasoning process. One of `auto`,
-
# `concise`, or `detailed`.
-
#
-
# @see OpenAI::Models::Reasoning#generate_summary
-
1
module GenerateSummary
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
CONCISE = :concise
-
1
DETAILED = :detailed
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# A summary of the reasoning performed by the model. This can be useful for
-
# debugging and understanding the model's reasoning process. One of `auto`,
-
# `concise`, or `detailed`.
-
#
-
# @see OpenAI::Models::Reasoning#summary
-
1
module Summary
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
CONCISE = :concise
-
1
DETAILED = :detailed
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# **o-series models only**
-
#
-
# Constrains effort on reasoning for
-
# [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
-
# supported values are `low`, `medium`, and `high`. Reducing reasoning effort can
-
# result in faster responses and fewer tokens used on reasoning in a response.
-
1
module ReasoningEffort
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
LOW = :low
-
1
MEDIUM = :medium
-
1
HIGH = :high
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class ResponseFormatJSONObject < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of response format being defined. Always `json_object`.
-
#
-
# @return [Symbol, :json_object]
-
1
required :type, const: :json_object
-
-
# @!method initialize(type: :json_object)
-
# JSON object response format. An older method of generating JSON responses. Using
-
# `json_schema` is recommended for models that support it. Note that the model
-
# will not generate JSON without a system or user message instructing it to do so.
-
#
-
# @param type [Symbol, :json_object] The type of response format being defined. Always `json_object`.
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class ResponseFormatJSONSchema < OpenAI::Internal::Type::BaseModel
-
# @!attribute json_schema
-
# Structured Outputs configuration options, including a JSON Schema.
-
#
-
# @return [OpenAI::Models::ResponseFormatJSONSchema::JSONSchema]
-
1
required :json_schema, -> { OpenAI::ResponseFormatJSONSchema::JSONSchema }
-
-
# @!attribute type
-
# The type of response format being defined. Always `json_schema`.
-
#
-
# @return [Symbol, :json_schema]
-
1
required :type, const: :json_schema
-
-
# @!method initialize(json_schema:, type: :json_schema)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ResponseFormatJSONSchema} for more details.
-
#
-
# JSON Schema response format. Used to generate structured JSON responses. Learn
-
# more about
-
# [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# @param json_schema [OpenAI::Models::ResponseFormatJSONSchema::JSONSchema] Structured Outputs configuration options, including a JSON Schema.
-
#
-
# @param type [Symbol, :json_schema] The type of response format being defined. Always `json_schema`.
-
-
# @see OpenAI::Models::ResponseFormatJSONSchema#json_schema
-
1
class JSONSchema < OpenAI::Internal::Type::BaseModel
-
# @!attribute name
-
# The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores
-
# and dashes, with a maximum length of 64.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute description
-
# A description of what the response format is for, used by the model to determine
-
# how to respond in the format.
-
#
-
# @return [String, nil]
-
1
optional :description, String
-
-
# @!attribute schema
-
# The schema for the response format, described as a JSON Schema object. Learn how
-
# to build JSON schemas [here](https://json-schema.org/).
-
#
-
# @return [Hash{Symbol=>Object}, nil]
-
1
optional :schema,
-
union: -> {
-
OpenAI::UnionOf[
-
OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown],
-
OpenAI::StructuredOutput::JsonSchemaConverter
-
]
-
}
-
-
# @!attribute strict
-
# Whether to enable strict schema adherence when generating the output. If set to
-
# true, the model will always follow the exact schema defined in the `schema`
-
# field. Only a subset of JSON Schema is supported when `strict` is `true`. To
-
# learn more, read the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# @return [Boolean, nil]
-
1
optional :strict, OpenAI::Internal::Type::Boolean, nil?: true
-
-
# @!method initialize(name:, description: nil, schema: nil, strict: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ResponseFormatJSONSchema::JSONSchema} for more details.
-
#
-
# Structured Outputs configuration options, including a JSON Schema.
-
#
-
# @param name [String] The name of the response format. Must be a-z, A-Z, 0-9, or contain
-
#
-
# @param description [String] A description of what the response format is for, used by the model to
-
#
-
# @param schema [Hash{Symbol=>Object}, OpenAI::StructuredOutput::JsonSchemaConverter] The schema for the response format, described as a JSON Schema object.
-
#
-
# @param strict [Boolean, nil] Whether to enable strict schema adherence when generating the output.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class ResponseFormatText < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of response format being defined. Always `text`.
-
#
-
# @return [Symbol, :text]
-
1
required :type, const: :text
-
-
# @!method initialize(type: :text)
-
# Default response format. Used to generate text responses.
-
#
-
# @param type [Symbol, :text] The type of response format being defined. Always `text`.
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ComputerTool < OpenAI::Internal::Type::BaseModel
-
# @!attribute display_height
-
# The height of the computer display.
-
#
-
# @return [Integer]
-
1
required :display_height, Integer
-
-
# @!attribute display_width
-
# The width of the computer display.
-
#
-
# @return [Integer]
-
1
required :display_width, Integer
-
-
# @!attribute environment
-
# The type of computer environment to control.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ComputerTool::Environment]
-
1
required :environment, enum: -> { OpenAI::Responses::ComputerTool::Environment }
-
-
# @!attribute type
-
# The type of the computer use tool. Always `computer_use_preview`.
-
#
-
# @return [Symbol, :computer_use_preview]
-
1
required :type, const: :computer_use_preview
-
-
# @!method initialize(display_height:, display_width:, environment:, type: :computer_use_preview)
-
# A tool that controls a virtual computer. Learn more about the
-
# [computer tool](https://platform.openai.com/docs/guides/tools-computer-use).
-
#
-
# @param display_height [Integer] The height of the computer display.
-
#
-
# @param display_width [Integer] The width of the computer display.
-
#
-
# @param environment [Symbol, OpenAI::Models::Responses::ComputerTool::Environment] The type of computer environment to control.
-
#
-
# @param type [Symbol, :computer_use_preview] The type of the computer use tool. Always `computer_use_preview`.
-
-
# The type of computer environment to control.
-
#
-
# @see OpenAI::Models::Responses::ComputerTool#environment
-
1
module Environment
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
WINDOWS = :windows
-
1
MAC = :mac
-
1
LINUX = :linux
-
1
UBUNTU = :ubuntu
-
1
BROWSER = :browser
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class EasyInputMessage < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# Text, image, or audio input to the model, used to generate a response. Can also
-
# contain previous assistant responses.
-
#
-
# @return [String, Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>]
-
1
required :content, union: -> { OpenAI::Responses::EasyInputMessage::Content }
-
-
# @!attribute role
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::EasyInputMessage::Role]
-
1
required :role, enum: -> { OpenAI::Responses::EasyInputMessage::Role }
-
-
# @!attribute type
-
# The type of the message input. Always `message`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::EasyInputMessage::Type, nil]
-
1
optional :type, enum: -> { OpenAI::Responses::EasyInputMessage::Type }
-
-
# @!method initialize(content:, role:, type: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::EasyInputMessage} for more details.
-
#
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
#
-
# @param content [String, Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>] Text, image, or audio input to the model, used to generate a response.
-
#
-
# @param role [Symbol, OpenAI::Models::Responses::EasyInputMessage::Role] The role of the message input. One of `user`, `assistant`, `system`, or
-
#
-
# @param type [Symbol, OpenAI::Models::Responses::EasyInputMessage::Type] The type of the message input. Always `message`.
-
-
# Text, image, or audio input to the model, used to generate a response. Can also
-
# contain previous assistant responses.
-
#
-
# @see OpenAI::Models::Responses::EasyInputMessage#content
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
# A text input to the model.
-
1
variant String
-
-
# A list of one or many input items to the model, containing different content
-
# types.
-
1
variant -> { OpenAI::Responses::ResponseInputMessageContentList }
-
-
# @!method self.variants
-
# @return [Array(String, Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>)]
-
end
-
-
# The role of the message input. One of `user`, `assistant`, `system`, or
-
# `developer`.
-
#
-
# @see OpenAI::Models::Responses::EasyInputMessage#role
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
USER = :user
-
1
ASSISTANT = :assistant
-
1
SYSTEM = :system
-
1
DEVELOPER = :developer
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The type of the message input. Always `message`.
-
#
-
# @see OpenAI::Models::Responses::EasyInputMessage#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
MESSAGE = :message
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class FileSearchTool < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of the file search tool. Always `file_search`.
-
#
-
# @return [Symbol, :file_search]
-
1
required :type, const: :file_search
-
-
# @!attribute vector_store_ids
-
# The IDs of the vector stores to search.
-
#
-
# @return [Array<String>]
-
1
required :vector_store_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute filters
-
# A filter to apply.
-
#
-
# @return [OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter, nil]
-
1
optional :filters, union: -> { OpenAI::Responses::FileSearchTool::Filters }, nil?: true
-
-
# @!attribute max_num_results
-
# The maximum number of results to return. This number should be between 1 and 50
-
# inclusive.
-
#
-
# @return [Integer, nil]
-
1
optional :max_num_results, Integer
-
-
# @!attribute ranking_options
-
# Ranking options for search.
-
#
-
# @return [OpenAI::Models::Responses::FileSearchTool::RankingOptions, nil]
-
1
optional :ranking_options, -> { OpenAI::Responses::FileSearchTool::RankingOptions }
-
-
# @!method initialize(vector_store_ids:, filters: nil, max_num_results: nil, ranking_options: nil, type: :file_search)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::FileSearchTool} for more details.
-
#
-
# A tool that searches for relevant content from uploaded files. Learn more about
-
# the
-
# [file search tool](https://platform.openai.com/docs/guides/tools-file-search).
-
#
-
# @param vector_store_ids [Array<String>] The IDs of the vector stores to search.
-
#
-
# @param filters [OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter, nil] A filter to apply.
-
#
-
# @param max_num_results [Integer] The maximum number of results to return. This number should be between 1 and 50
-
#
-
# @param ranking_options [OpenAI::Models::Responses::FileSearchTool::RankingOptions] Ranking options for search.
-
#
-
# @param type [Symbol, :file_search] The type of the file search tool. Always `file_search`.
-
-
# A filter to apply.
-
#
-
# @see OpenAI::Models::Responses::FileSearchTool#filters
-
1
module Filters
-
1
extend OpenAI::Internal::Type::Union
-
-
# A filter used to compare a specified attribute key to a given value using a defined comparison operation.
-
1
variant -> { OpenAI::ComparisonFilter }
-
-
# Combine multiple filters using `and` or `or`.
-
1
variant -> { OpenAI::CompoundFilter }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter)]
-
end
-
-
# @see OpenAI::Models::Responses::FileSearchTool#ranking_options
-
1
class RankingOptions < OpenAI::Internal::Type::BaseModel
-
# @!attribute ranker
-
# The ranker to use for the file search.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::FileSearchTool::RankingOptions::Ranker, nil]
-
1
optional :ranker, enum: -> { OpenAI::Responses::FileSearchTool::RankingOptions::Ranker }
-
-
# @!attribute score_threshold
-
# The score threshold for the file search, a number between 0 and 1. Numbers
-
# closer to 1 will attempt to return only the most relevant results, but may
-
# return fewer results.
-
#
-
# @return [Float, nil]
-
1
optional :score_threshold, Float
-
-
# @!method initialize(ranker: nil, score_threshold: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::FileSearchTool::RankingOptions} for more details.
-
#
-
# Ranking options for search.
-
#
-
# @param ranker [Symbol, OpenAI::Models::Responses::FileSearchTool::RankingOptions::Ranker] The ranker to use for the file search.
-
#
-
# @param score_threshold [Float] The score threshold for the file search, a number between 0 and 1. Numbers close
-
-
# The ranker to use for the file search.
-
#
-
# @see OpenAI::Models::Responses::FileSearchTool::RankingOptions#ranker
-
1
module Ranker
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
DEFAULT_2024_11_15 = :"default-2024-11-15"
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class FunctionTool < OpenAI::Internal::Type::BaseModel
-
# @!attribute name
-
# The name of the function to call.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute parameters
-
# A JSON schema object describing the parameters of the function.
-
#
-
# @return [Hash{Symbol=>Object}, nil]
-
1
required :parameters,
-
union: OpenAI::UnionOf[
-
OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown],
-
OpenAI::StructuredOutput::JsonSchemaConverter
-
],
-
nil?: true
-
-
# @!attribute strict
-
# Whether to enforce strict parameter validation. Default `true`.
-
#
-
# @return [Boolean, nil]
-
1
required :strict, OpenAI::Internal::Type::Boolean, nil?: true
-
-
# @!attribute type
-
# The type of the function tool. Always `function`.
-
#
-
# @return [Symbol, :function]
-
1
required :type, const: :function
-
-
# @!attribute description
-
# A description of the function. Used by the model to determine whether or not to
-
# call the function.
-
#
-
# @return [String, nil]
-
1
optional :description, String, nil?: true
-
-
# @!method initialize(name:, parameters:, strict:, description: nil, type: :function)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::FunctionTool} for more details.
-
#
-
# Defines a function in your own code the model can choose to call. Learn more
-
# about
-
# [function calling](https://platform.openai.com/docs/guides/function-calling).
-
#
-
# @param name [String] The name of the function to call.
-
#
-
# @param parameters [Hash{Symbol=>Object}, nil] A JSON schema object describing the parameters of the function.
-
#
-
# @param strict [Boolean, nil] Whether to enforce strict parameter validation. Default `true`.
-
#
-
# @param description [String, nil] A description of the function. Used by the model to determine whether or not to
-
#
-
# @param type [Symbol, :function] The type of the function tool. Always `function`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# @see OpenAI::Resources::Responses::InputItems#list
-
1
class InputItemListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute after
-
# An item ID to list items after, used in pagination.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute before
-
# An item ID to list items before, used in pagination.
-
#
-
# @return [String, nil]
-
1
optional :before, String
-
-
# @!attribute include
-
# Additional fields to include in the response. See the `include` parameter for
-
# Response creation above for more information.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil]
-
1
optional :include, -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Responses::ResponseIncludable] }
-
-
# @!attribute limit
-
# A limit on the number of objects to be returned. Limit can range between 1 and
-
# 100, and the default is 20.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!attribute order
-
# The order to return the input items in. Default is `desc`.
-
#
-
# - `asc`: Return the input items in ascending order.
-
# - `desc`: Return the input items in descending order.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::InputItemListParams::Order, nil]
-
1
optional :order, enum: -> { OpenAI::Responses::InputItemListParams::Order }
-
-
# @!method initialize(after: nil, before: nil, include: nil, limit: nil, order: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::InputItemListParams} for more details.
-
#
-
# @param after [String] An item ID to list items after, used in pagination.
-
#
-
# @param before [String] An item ID to list items before, used in pagination.
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>] Additional fields to include in the response. See the `include`
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between
-
#
-
# @param order [Symbol, OpenAI::Models::Responses::InputItemListParams::Order] The order to return the input items in. Default is `desc`.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# The order to return the input items in. Default is `desc`.
-
#
-
# - `asc`: Return the input items in ascending order.
-
# - `desc`: Return the input items in descending order.
-
1
module Order
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASC = :asc
-
1
DESC = :desc
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# @see OpenAI::Resources::Responses#create
-
#
-
# @see OpenAI::Resources::Responses#stream_raw
-
1
class Response < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# Unique identifier for this Response.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# Unix timestamp (in seconds) of when this Response was created.
-
#
-
# @return [Float]
-
1
required :created_at, Float
-
-
# @!attribute error
-
# An error object returned when the model fails to generate a Response.
-
#
-
# @return [OpenAI::Models::Responses::ResponseError, nil]
-
1
required :error, -> { OpenAI::Responses::ResponseError }, nil?: true
-
-
# @!attribute incomplete_details
-
# Details about why the response is incomplete.
-
#
-
# @return [OpenAI::Models::Responses::Response::IncompleteDetails, nil]
-
1
required :incomplete_details, -> { OpenAI::Responses::Response::IncompleteDetails }, nil?: true
-
-
# @!attribute instructions
-
# A system (or developer) message inserted into the model's context.
-
#
-
# When using along with `previous_response_id`, the instructions from a previous
-
# response will not be carried over to the next response. This makes it simple to
-
# swap out system (or developer) messages in new responses.
-
#
-
# @return [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
-
1
required :instructions, union: -> { OpenAI::Responses::Response::Instructions }, nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute model
-
# Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
-
# wide range of models with different capabilities, performance characteristics,
-
# and price points. Refer to the
-
# [model guide](https://platform.openai.com/docs/models) to browse and compare
-
# available models.
-
#
-
# @return [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel]
-
1
required :model, union: -> { OpenAI::ResponsesModel }
-
-
# @!attribute object
-
# The object type of this resource - always set to `response`.
-
#
-
# @return [Symbol, :response]
-
1
required :object, const: :response
-
-
# @!attribute output
-
# An array of content items generated by the model.
-
#
-
# - The length and order of items in the `output` array is dependent on the
-
# model's response.
-
# - Rather than accessing the first item in the `output` array and assuming it's
-
# an `assistant` message with the content generated by the model, you might
-
# consider using the `output_text` property where supported in SDKs.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest>]
-
1
required :output, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputItem] }
-
-
# @!attribute parallel_tool_calls
-
# Whether to allow the model to run tool calls in parallel.
-
#
-
# @return [Boolean]
-
1
required :parallel_tool_calls, OpenAI::Internal::Type::Boolean
-
-
# @!attribute temperature
-
# What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
-
# make the output more random, while lower values like 0.2 will make it more
-
# focused and deterministic. We generally recommend altering this or `top_p` but
-
# not both.
-
#
-
# @return [Float, nil]
-
1
required :temperature, Float, nil?: true
-
-
# @!attribute tool_choice
-
# How the model should select which tool (or tools) to use when generating a
-
# response. See the `tools` parameter to see how to specify which tools the model
-
# can call.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp]
-
1
required :tool_choice, union: -> { OpenAI::Responses::Response::ToolChoice }
-
-
# @!attribute tools
-
# An array of tools the model may call while generating a response. You can
-
# specify which tool to use by setting the `tool_choice` parameter.
-
#
-
# The two categories of tools you can provide the model are:
-
#
-
# - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
-
# capabilities, like
-
# [web search](https://platform.openai.com/docs/guides/tools-web-search) or
-
# [file search](https://platform.openai.com/docs/guides/tools-file-search).
-
# Learn more about
-
# [built-in tools](https://platform.openai.com/docs/guides/tools).
-
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
-
# the model to call your own code. Learn more about
-
# [function calling](https://platform.openai.com/docs/guides/function-calling).
-
#
-
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>]
-
1
required :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
-
-
# @!attribute top_p
-
# An alternative to sampling with temperature, called nucleus sampling, where the
-
# model considers the results of the tokens with top_p probability mass. So 0.1
-
# means only the tokens comprising the top 10% probability mass are considered.
-
#
-
# We generally recommend altering this or `temperature` but not both.
-
#
-
# @return [Float, nil]
-
1
required :top_p, Float, nil?: true
-
-
# @!attribute background
-
# Whether to run the model response in the background.
-
# [Learn more](https://platform.openai.com/docs/guides/background).
-
#
-
# @return [Boolean, nil]
-
1
optional :background, OpenAI::Internal::Type::Boolean, nil?: true
-
-
# @!attribute max_output_tokens
-
# An upper bound for the number of tokens that can be generated for a response,
-
# including visible output tokens and
-
# [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
-
#
-
# @return [Integer, nil]
-
1
optional :max_output_tokens, Integer, nil?: true
-
-
# @!attribute max_tool_calls
-
# The maximum number of total calls to built-in tools that can be processed in a
-
# response. This maximum number applies across all built-in tool calls, not per
-
# individual tool. Any further attempts to call a tool by the model will be
-
# ignored.
-
#
-
# @return [Integer, nil]
-
1
optional :max_tool_calls, Integer, nil?: true
-
-
# @!attribute previous_response_id
-
# The unique ID of the previous response to the model. Use this to create
-
# multi-turn conversations. Learn more about
-
# [conversation state](https://platform.openai.com/docs/guides/conversation-state).
-
#
-
# @return [String, nil]
-
1
optional :previous_response_id, String, nil?: true
-
-
# @!attribute prompt
-
# Reference to a prompt template and its variables.
-
# [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
-
#
-
# @return [OpenAI::Models::Responses::ResponsePrompt, nil]
-
1
optional :prompt, -> { OpenAI::Responses::ResponsePrompt }, nil?: true
-
-
# @!attribute prompt_cache_key
-
# Used by OpenAI to cache responses for similar requests to optimize your cache
-
# hit rates. Replaces the `user` field.
-
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
-
#
-
# @return [String, nil]
-
1
optional :prompt_cache_key, String
-
-
# @!attribute reasoning
-
# **o-series models only**
-
#
-
# Configuration options for
-
# [reasoning models](https://platform.openai.com/docs/guides/reasoning).
-
#
-
# @return [OpenAI::Models::Reasoning, nil]
-
1
optional :reasoning, -> { OpenAI::Reasoning }, nil?: true
-
-
# @!attribute safety_identifier
-
# A stable identifier used to help detect users of your application that may be
-
# violating OpenAI's usage policies. The IDs should be a string that uniquely
-
# identifies each user. We recommend hashing their username or email address, in
-
# order to avoid sending us any identifying information.
-
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
-
#
-
# @return [String, nil]
-
1
optional :safety_identifier, String
-
-
# @!attribute service_tier
-
# Specifies the processing type used for serving the request.
-
#
-
# - If set to 'auto', then the request will be processed with the service tier
-
# configured in the Project settings. Unless otherwise configured, the Project
-
# will use 'default'.
-
# - If set to 'default', then the request will be processed with the standard
-
# pricing and performance for the selected model.
-
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-
# 'priority', then the request will be processed with the corresponding service
-
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-
# Priority processing.
-
# - When not set, the default behavior is 'auto'.
-
#
-
# When the `service_tier` parameter is set, the response body will include the
-
# `service_tier` value based on the processing mode actually used to serve the
-
# request. This response value may be different from the value set in the
-
# parameter.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil]
-
1
optional :service_tier, enum: -> { OpenAI::Responses::Response::ServiceTier }, nil?: true
-
-
# @!attribute status
-
# The status of the response generation. One of `completed`, `failed`,
-
# `in_progress`, `cancelled`, `queued`, or `incomplete`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseStatus, nil]
-
1
optional :status, enum: -> { OpenAI::Responses::ResponseStatus }
-
-
# @!attribute text
-
# Configuration options for a text response from the model. Can be plain text or
-
# structured JSON data. Learn more:
-
#
-
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
#
-
# @return [OpenAI::Models::Responses::ResponseTextConfig, nil]
-
1
optional :text, -> { OpenAI::Responses::ResponseTextConfig }
-
-
# @!attribute top_logprobs
-
# An integer between 0 and 20 specifying the number of most likely tokens to
-
# return at each token position, each with an associated log probability.
-
#
-
# @return [Integer, nil]
-
1
optional :top_logprobs, Integer, nil?: true
-
-
# @!attribute truncation
-
# The truncation strategy to use for the model response.
-
#
-
# - `auto`: If the context of this response and previous ones exceeds the model's
-
# context window size, the model will truncate the response to fit the context
-
# window by dropping input items in the middle of the conversation.
-
# - `disabled` (default): If a model response will exceed the context window size
-
# for a model, the request will fail with a 400 error.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::Response::Truncation, nil]
-
1
optional :truncation, enum: -> { OpenAI::Responses::Response::Truncation }, nil?: true
-
-
# @!attribute usage
-
# Represents token usage details including input tokens, output tokens, a
-
# breakdown of output tokens, and the total tokens used.
-
#
-
# @return [OpenAI::Models::Responses::ResponseUsage, nil]
-
1
optional :usage, -> { OpenAI::Responses::ResponseUsage }
-
-
# @!attribute user
-
# @deprecated
-
#
-
# This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
-
# `prompt_cache_key` instead to maintain caching optimizations. A stable
-
# identifier for your end-users. Used to boost cache hit rates by better bucketing
-
# similar requests and to help OpenAI detect and prevent abuse.
-
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
-
#
-
# @return [String, nil]
-
1
optional :user, String
-
-
# Convenience property that aggregates all `output_text` items from the `output` list.
-
#
-
# If no `output_text` content blocks exist, then an empty string is returned.
-
#
-
# @return [String]
-
1
def output_text
-
texts = []
-
-
output.each do |item|
-
else: 0
then: 0
next unless item.type == :message
-
item.content.each do |content|
-
then: 0
else: 0
if content.type == :output_text
-
texts << content.text
-
end
-
end
-
end
-
-
texts.join
-
end
-
-
# @!method initialize(id:, created_at:, error:, incomplete_details:, instructions:, metadata:, model:, output:, parallel_tool_calls:, temperature:, tool_choice:, tools:, top_p:, background: nil, max_output_tokens: nil, max_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, status: nil, text: nil, top_logprobs: nil, truncation: nil, usage: nil, user: nil, object: :response)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::Response} for more details.
-
#
-
# @param id [String] Unique identifier for this Response.
-
#
-
# @param created_at [Float] Unix timestamp (in seconds) of when this Response was created.
-
#
-
# @param error [OpenAI::Models::Responses::ResponseError, nil] An error object returned when the model fails to generate a Response.
-
#
-
# @param incomplete_details [OpenAI::Models::Responses::Response::IncompleteDetails, nil] Details about why the response is incomplete.
-
#
-
# @param instructions [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil] A system (or developer) message inserted into the model's context.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
-
#
-
# @param output [Array<OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest>] An array of content items generated by the model.
-
#
-
# @param parallel_tool_calls [Boolean] Whether to allow the model to run tool calls in parallel.
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
-
#
-
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
-
#
-
# @param background [Boolean, nil] Whether to run the model response in the background.
-
#
-
# @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
-
#
-
# @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
-
#
-
# @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
-
#
-
# @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
-
#
-
# @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
-
#
-
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
-
#
-
# @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
-
#
-
# @param service_tier [Symbol, OpenAI::Models::Responses::Response::ServiceTier, nil] Specifies the processing type used for serving the request.
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseStatus] The status of the response generation. One of `completed`, `failed`,
-
#
-
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
-
#
-
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
-
#
-
# @param truncation [Symbol, OpenAI::Models::Responses::Response::Truncation, nil] The truncation strategy to use for the model response.
-
#
-
# @param usage [OpenAI::Models::Responses::ResponseUsage] Represents token usage details including input tokens, output tokens,
-
#
-
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
-
#
-
# @param object [Symbol, :response] The object type of this resource - always set to `response`.
-
-
# @see OpenAI::Models::Responses::Response#incomplete_details
-
1
class IncompleteDetails < OpenAI::Internal::Type::BaseModel
-
# @!attribute reason
-
# The reason why the response is incomplete.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::Response::IncompleteDetails::Reason, nil]
-
1
optional :reason, enum: -> { OpenAI::Responses::Response::IncompleteDetails::Reason }
-
-
# @!method initialize(reason: nil)
-
# Details about why the response is incomplete.
-
#
-
# @param reason [Symbol, OpenAI::Models::Responses::Response::IncompleteDetails::Reason] The reason why the response is incomplete.
-
-
# The reason why the response is incomplete.
-
#
-
# @see OpenAI::Models::Responses::Response::IncompleteDetails#reason
-
1
module Reason
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
MAX_OUTPUT_TOKENS = :max_output_tokens
-
1
CONTENT_FILTER = :content_filter
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
# A system (or developer) message inserted into the model's context.
-
#
-
# When using along with `previous_response_id`, the instructions from a previous
-
# response will not be carried over to the next response. This makes it simple to
-
# swap out system (or developer) messages in new responses.
-
#
-
# @see OpenAI::Models::Responses::Response#instructions
-
1
module Instructions
-
1
extend OpenAI::Internal::Type::Union
-
-
# A text input to the model, equivalent to a text input with the
-
# `developer` role.
-
1
variant String
-
-
# A list of one or many input items to the model, containing
-
# different content types.
-
1
variant -> { OpenAI::Models::Responses::Response::Instructions::ResponseInputItemArray }
-
-
# @!method self.variants
-
# @return [Array(String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
ResponseInputItemArray =
-
1
OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Responses::ResponseInputItem }]
-
end
-
-
# How the model should select which tool (or tools) to use when generating a
-
# response. See the `tools` parameter to see how to specify which tools the model
-
# can call.
-
#
-
# @see OpenAI::Models::Responses::Response#tool_choice
-
1
module ToolChoice
-
1
extend OpenAI::Internal::Type::Union
-
-
# Controls which (if any) tool is called by the model.
-
#
-
# `none` means the model will not call any tool and instead generates a message.
-
#
-
# `auto` means the model can pick between generating a message or calling one or
-
# more tools.
-
#
-
# `required` means the model must call one or more tools.
-
1
variant enum: -> { OpenAI::Responses::ToolChoiceOptions }
-
-
# Indicates that the model should use a built-in tool to generate a response.
-
# [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools).
-
1
variant -> { OpenAI::Responses::ToolChoiceTypes }
-
-
# Use this option to force the model to call a specific function.
-
1
variant -> { OpenAI::Responses::ToolChoiceFunction }
-
-
# Use this option to force the model to call a specific tool on a remote MCP server.
-
1
variant -> { OpenAI::Responses::ToolChoiceMcp }
-
-
# @!method self.variants
-
# @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp)]
-
end
-
-
# Specifies the processing type used for serving the request.
-
#
-
# - If set to 'auto', then the request will be processed with the service tier
-
# configured in the Project settings. Unless otherwise configured, the Project
-
# will use 'default'.
-
# - If set to 'default', then the request will be processed with the standard
-
# pricing and performance for the selected model.
-
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-
# 'priority', then the request will be processed with the corresponding service
-
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-
# Priority processing.
-
# - When not set, the default behavior is 'auto'.
-
#
-
# When the `service_tier` parameter is set, the response body will include the
-
# `service_tier` value based on the processing mode actually used to serve the
-
# request. This response value may be different from the value set in the
-
# parameter.
-
#
-
# @see OpenAI::Models::Responses::Response#service_tier
-
1
module ServiceTier
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
DEFAULT = :default
-
1
FLEX = :flex
-
1
SCALE = :scale
-
1
PRIORITY = :priority
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The truncation strategy to use for the model response.
-
#
-
# - `auto`: If the context of this response and previous ones exceeds the model's
-
# context window size, the model will truncate the response to fit the context
-
# window by dropping input items in the middle of the conversation.
-
# - `disabled` (default): If a model response will exceed the context window size
-
# for a model, the request will fail with a 400 error.
-
#
-
# @see OpenAI::Models::Responses::Response#truncation
-
1
module Truncation
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
DISABLED = :disabled
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseAudioDeltaEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute delta
-
# A chunk of Base64 encoded response audio bytes.
-
#
-
# @return [String]
-
1
required :delta, String
-
-
# @!attribute sequence_number
-
# A sequence number for this chunk of the stream response.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.audio.delta`.
-
#
-
# @return [Symbol, :"response.audio.delta"]
-
1
required :type, const: :"response.audio.delta"
-
-
# @!method initialize(delta:, sequence_number:, type: :"response.audio.delta")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseAudioDeltaEvent} for more details.
-
#
-
# Emitted when there is a partial audio response.
-
#
-
# @param delta [String] A chunk of Base64 encoded response audio bytes.
-
#
-
# @param sequence_number [Integer] A sequence number for this chunk of the stream response.
-
#
-
# @param type [Symbol, :"response.audio.delta"] The type of the event. Always `response.audio.delta`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseAudioDoneEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute sequence_number
-
# The sequence number of the delta.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.audio.done`.
-
#
-
# @return [Symbol, :"response.audio.done"]
-
1
required :type, const: :"response.audio.done"
-
-
# @!method initialize(sequence_number:, type: :"response.audio.done")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseAudioDoneEvent} for more details.
-
#
-
# Emitted when the audio response is complete.
-
#
-
# @param sequence_number [Integer] The sequence number of the delta.
-
#
-
# @param type [Symbol, :"response.audio.done"] The type of the event. Always `response.audio.done`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseAudioTranscriptDeltaEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute delta
-
# The partial transcript of the audio response.
-
#
-
# @return [String]
-
1
required :delta, String
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.audio.transcript.delta`.
-
#
-
# @return [Symbol, :"response.audio.transcript.delta"]
-
1
required :type, const: :"response.audio.transcript.delta"
-
-
# @!method initialize(delta:, sequence_number:, type: :"response.audio.transcript.delta")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent} for more details.
-
#
-
# Emitted when there is a partial transcript of audio.
-
#
-
# @param delta [String] The partial transcript of the audio response.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.audio.transcript.delta"] The type of the event. Always `response.audio.transcript.delta`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseAudioTranscriptDoneEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.audio.transcript.done`.
-
#
-
# @return [Symbol, :"response.audio.transcript.done"]
-
1
required :type, const: :"response.audio.transcript.done"
-
-
# @!method initialize(sequence_number:, type: :"response.audio.transcript.done")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent} for more details.
-
#
-
# Emitted when the full audio transcript is completed.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.audio.transcript.done"] The type of the event. Always `response.audio.transcript.done`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# @see OpenAI::Resources::Responses#cancel
-
1
class ResponseCancelParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseCodeInterpreterCallCodeDeltaEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute delta
-
# The partial code snippet being streamed by the code interpreter.
-
#
-
# @return [String]
-
1
required :delta, String
-
-
# @!attribute item_id
-
# The unique identifier of the code interpreter tool call item.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item in the response for which the code is being
-
# streamed.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event, used to order streaming events.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.code_interpreter_call_code.delta`.
-
#
-
# @return [Symbol, :"response.code_interpreter_call_code.delta"]
-
1
required :type, const: :"response.code_interpreter_call_code.delta"
-
-
# @!method initialize(delta:, item_id:, output_index:, sequence_number:, type: :"response.code_interpreter_call_code.delta")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent} for more
-
# details.
-
#
-
# Emitted when a partial code snippet is streamed by the code interpreter.
-
#
-
# @param delta [String] The partial code snippet being streamed by the code interpreter.
-
#
-
# @param item_id [String] The unique identifier of the code interpreter tool call item.
-
#
-
# @param output_index [Integer] The index of the output item in the response for which the code is being streame
-
#
-
# @param sequence_number [Integer] The sequence number of this event, used to order streaming events.
-
#
-
# @param type [Symbol, :"response.code_interpreter_call_code.delta"] The type of the event. Always `response.code_interpreter_call_code.delta`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseCodeInterpreterCallCodeDoneEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute code
-
# The final code snippet output by the code interpreter.
-
#
-
# @return [String]
-
1
required :code, String
-
-
# @!attribute item_id
-
# The unique identifier of the code interpreter tool call item.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item in the response for which the code is finalized.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event, used to order streaming events.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.code_interpreter_call_code.done`.
-
#
-
# @return [Symbol, :"response.code_interpreter_call_code.done"]
-
1
required :type, const: :"response.code_interpreter_call_code.done"
-
-
# @!method initialize(code:, item_id:, output_index:, sequence_number:, type: :"response.code_interpreter_call_code.done")
-
# Emitted when the code snippet is finalized by the code interpreter.
-
#
-
# @param code [String] The final code snippet output by the code interpreter.
-
#
-
# @param item_id [String] The unique identifier of the code interpreter tool call item.
-
#
-
# @param output_index [Integer] The index of the output item in the response for which the code is finalized.
-
#
-
# @param sequence_number [Integer] The sequence number of this event, used to order streaming events.
-
#
-
# @param type [Symbol, :"response.code_interpreter_call_code.done"] The type of the event. Always `response.code_interpreter_call_code.done`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseCodeInterpreterCallCompletedEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The unique identifier of the code interpreter tool call item.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item in the response for which the code interpreter call
-
# is completed.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event, used to order streaming events.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.code_interpreter_call.completed`.
-
#
-
# @return [Symbol, :"response.code_interpreter_call.completed"]
-
1
required :type, const: :"response.code_interpreter_call.completed"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.code_interpreter_call.completed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent} for more
-
# details.
-
#
-
# Emitted when the code interpreter call is completed.
-
#
-
# @param item_id [String] The unique identifier of the code interpreter tool call item.
-
#
-
# @param output_index [Integer] The index of the output item in the response for which the code interpreter call
-
#
-
# @param sequence_number [Integer] The sequence number of this event, used to order streaming events.
-
#
-
# @param type [Symbol, :"response.code_interpreter_call.completed"] The type of the event. Always `response.code_interpreter_call.completed`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseCodeInterpreterCallInProgressEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The unique identifier of the code interpreter tool call item.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item in the response for which the code interpreter call
-
# is in progress.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event, used to order streaming events.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.code_interpreter_call.in_progress`.
-
#
-
# @return [Symbol, :"response.code_interpreter_call.in_progress"]
-
1
required :type, const: :"response.code_interpreter_call.in_progress"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.code_interpreter_call.in_progress")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent} for more
-
# details.
-
#
-
# Emitted when a code interpreter call is in progress.
-
#
-
# @param item_id [String] The unique identifier of the code interpreter tool call item.
-
#
-
# @param output_index [Integer] The index of the output item in the response for which the code interpreter call
-
#
-
# @param sequence_number [Integer] The sequence number of this event, used to order streaming events.
-
#
-
# @param type [Symbol, :"response.code_interpreter_call.in_progress"] The type of the event. Always `response.code_interpreter_call.in_progress`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseCodeInterpreterCallInterpretingEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The unique identifier of the code interpreter tool call item.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item in the response for which the code interpreter is
-
# interpreting code.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event, used to order streaming events.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.code_interpreter_call.interpreting`.
-
#
-
# @return [Symbol, :"response.code_interpreter_call.interpreting"]
-
1
required :type, const: :"response.code_interpreter_call.interpreting"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.code_interpreter_call.interpreting")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent} for
-
# more details.
-
#
-
# Emitted when the code interpreter is actively interpreting the code snippet.
-
#
-
# @param item_id [String] The unique identifier of the code interpreter tool call item.
-
#
-
# @param output_index [Integer] The index of the output item in the response for which the code interpreter is i
-
#
-
# @param sequence_number [Integer] The sequence number of this event, used to order streaming events.
-
#
-
# @param type [Symbol, :"response.code_interpreter_call.interpreting"] The type of the event. Always `response.code_interpreter_call.interpreting`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseCodeInterpreterToolCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the code interpreter tool call.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute code
-
# The code to run, or null if not available.
-
#
-
# @return [String, nil]
-
1
required :code, String, nil?: true
-
-
# @!attribute container_id
-
# The ID of the container used to run the code.
-
#
-
# @return [String]
-
1
required :container_id, String
-
-
# @!attribute outputs
-
# The outputs generated by the code interpreter, such as logs or images. Can be
-
# null if no outputs are available.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Output::Logs, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Output::Image>, nil]
-
1
required :outputs,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseCodeInterpreterToolCall::Output]
-
},
-
nil?: true
-
-
# @!attribute status
-
# The status of the code interpreter tool call. Valid values are `in_progress`,
-
# `completed`, `incomplete`, `interpreting`, and `failed`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Status]
-
1
required :status, enum: -> { OpenAI::Responses::ResponseCodeInterpreterToolCall::Status }
-
-
# @!attribute type
-
# The type of the code interpreter tool call. Always `code_interpreter_call`.
-
#
-
# @return [Symbol, :code_interpreter_call]
-
1
required :type, const: :code_interpreter_call
-
-
# @!method initialize(id:, code:, container_id:, outputs:, status:, type: :code_interpreter_call)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseCodeInterpreterToolCall} for more details.
-
#
-
# A tool call to run code.
-
#
-
# @param id [String] The unique ID of the code interpreter tool call.
-
#
-
# @param code [String, nil] The code to run, or null if not available.
-
#
-
# @param container_id [String] The ID of the container used to run the code.
-
#
-
# @param outputs [Array<OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Output::Logs, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Output::Image>, nil] The outputs generated by the code interpreter, such as logs or images.
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Status] The status of the code interpreter tool call. Valid values are `in_progress`, `c
-
#
-
# @param type [Symbol, :code_interpreter_call] The type of the code interpreter tool call. Always `code_interpreter_call`.
-
-
# The logs output from the code interpreter.
-
1
module Output
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# The logs output from the code interpreter.
-
1
variant :logs, -> { OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Logs }
-
-
# The image output from the code interpreter.
-
1
variant :image, -> { OpenAI::Responses::ResponseCodeInterpreterToolCall::Output::Image }
-
-
1
class Logs < OpenAI::Internal::Type::BaseModel
-
# @!attribute logs
-
# The logs output from the code interpreter.
-
#
-
# @return [String]
-
1
required :logs, String
-
-
# @!attribute type
-
# The type of the output. Always 'logs'.
-
#
-
# @return [Symbol, :logs]
-
1
required :type, const: :logs
-
-
# @!method initialize(logs:, type: :logs)
-
# The logs output from the code interpreter.
-
#
-
# @param logs [String] The logs output from the code interpreter.
-
#
-
# @param type [Symbol, :logs] The type of the output. Always 'logs'.
-
end
-
-
1
class Image < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of the output. Always 'image'.
-
#
-
# @return [Symbol, :image]
-
1
required :type, const: :image
-
-
# @!attribute url
-
# The URL of the image output from the code interpreter.
-
#
-
# @return [String]
-
1
required :url, String
-
-
# @!method initialize(url:, type: :image)
-
# The image output from the code interpreter.
-
#
-
# @param url [String] The URL of the image output from the code interpreter.
-
#
-
# @param type [Symbol, :image] The type of the output. Always 'image'.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Output::Logs, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall::Output::Image)]
-
end
-
-
# The status of the code interpreter tool call. Valid values are `in_progress`,
-
# `completed`, `incomplete`, `interpreting`, and `failed`.
-
#
-
# @see OpenAI::Models::Responses::ResponseCodeInterpreterToolCall#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
INCOMPLETE = :incomplete
-
1
INTERPRETING = :interpreting
-
1
FAILED = :failed
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseCompletedEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute response
-
# Properties of the completed response.
-
#
-
# @return [OpenAI::Models::Responses::Response]
-
1
required :response, -> { OpenAI::Responses::Response }
-
-
# @!attribute sequence_number
-
# The sequence number for this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.completed`.
-
#
-
# @return [Symbol, :"response.completed"]
-
1
required :type, const: :"response.completed"
-
-
# @!method initialize(response:, sequence_number:, type: :"response.completed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseCompletedEvent} for more details.
-
#
-
# Emitted when the model response is complete.
-
#
-
# @param response [OpenAI::Models::Responses::Response] Properties of the completed response.
-
#
-
# @param sequence_number [Integer] The sequence number for this event.
-
#
-
# @param type [Symbol, :"response.completed"] The type of the event. Always `response.completed`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseComputerToolCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the computer call.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute action
-
# A click action.
-
#
-
# @return [OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click, OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait]
-
1
required :action, union: -> { OpenAI::Responses::ResponseComputerToolCall::Action }
-
-
# @!attribute call_id
-
# An identifier used when responding to the tool call with output.
-
#
-
# @return [String]
-
1
required :call_id, String
-
-
# @!attribute pending_safety_checks
-
# The pending safety checks for the computer call.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseComputerToolCall::PendingSafetyCheck>]
-
1
required :pending_safety_checks,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::PendingSafetyCheck]
-
}
-
-
# @!attribute status
-
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
-
# Populated when items are returned via API.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Status]
-
1
required :status, enum: -> { OpenAI::Responses::ResponseComputerToolCall::Status }
-
-
# @!attribute type
-
# The type of the computer call. Always `computer_call`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Type]
-
1
required :type, enum: -> { OpenAI::Responses::ResponseComputerToolCall::Type }
-
-
# @!method initialize(id:, action:, call_id:, pending_safety_checks:, status:, type:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseComputerToolCall} for more details.
-
#
-
# A tool call to a computer use tool. See the
-
# [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use)
-
# for more information.
-
#
-
# @param id [String] The unique ID of the computer call.
-
#
-
# @param action [OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click, OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait] A click action.
-
#
-
# @param call_id [String] An identifier used when responding to the tool call with output.
-
#
-
# @param pending_safety_checks [Array<OpenAI::Models::Responses::ResponseComputerToolCall::PendingSafetyCheck>] The pending safety checks for the computer call.
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Status] The status of the item. One of `in_progress`, `completed`, or
-
#
-
# @param type [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Type] The type of the computer call. Always `computer_call`.
-
-
# A click action.
-
#
-
# @see OpenAI::Models::Responses::ResponseComputerToolCall#action
-
1
module Action
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A click action.
-
1
variant :click, -> { OpenAI::Responses::ResponseComputerToolCall::Action::Click }
-
-
# A double click action.
-
1
variant :double_click, -> { OpenAI::Responses::ResponseComputerToolCall::Action::DoubleClick }
-
-
# A drag action.
-
1
variant :drag, -> { OpenAI::Responses::ResponseComputerToolCall::Action::Drag }
-
-
# A collection of keypresses the model would like to perform.
-
1
variant :keypress, -> { OpenAI::Responses::ResponseComputerToolCall::Action::Keypress }
-
-
# A mouse move action.
-
1
variant :move, -> { OpenAI::Responses::ResponseComputerToolCall::Action::Move }
-
-
# A screenshot action.
-
1
variant :screenshot, -> { OpenAI::Responses::ResponseComputerToolCall::Action::Screenshot }
-
-
# A scroll action.
-
1
variant :scroll, -> { OpenAI::Responses::ResponseComputerToolCall::Action::Scroll }
-
-
# An action to type in text.
-
1
variant :type, -> { OpenAI::Responses::ResponseComputerToolCall::Action::Type }
-
-
# A wait action.
-
1
variant :wait, -> { OpenAI::Responses::ResponseComputerToolCall::Action::Wait }
-
-
1
class Click < OpenAI::Internal::Type::BaseModel
-
# @!attribute button
-
# Indicates which mouse button was pressed during the click. One of `left`,
-
# `right`, `wheel`, `back`, or `forward`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click::Button]
-
1
required :button, enum: -> { OpenAI::Responses::ResponseComputerToolCall::Action::Click::Button }
-
-
# @!attribute type
-
# Specifies the event type. For a click action, this property is always set to
-
# `click`.
-
#
-
# @return [Symbol, :click]
-
1
required :type, const: :click
-
-
# @!attribute x
-
# The x-coordinate where the click occurred.
-
#
-
# @return [Integer]
-
1
required :x, Integer
-
-
# @!attribute y_
-
# The y-coordinate where the click occurred.
-
#
-
# @return [Integer]
-
1
required :y_, Integer, api_name: :y
-
-
# @!method initialize(button:, x:, y_:, type: :click)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click} for more
-
# details.
-
#
-
# A click action.
-
#
-
# @param button [Symbol, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click::Button] Indicates which mouse button was pressed during the click. One of `left`, `right
-
#
-
# @param x [Integer] The x-coordinate where the click occurred.
-
#
-
# @param y_ [Integer] The y-coordinate where the click occurred.
-
#
-
# @param type [Symbol, :click] Specifies the event type. For a click action, this property is
-
-
# Indicates which mouse button was pressed during the click. One of `left`,
-
# `right`, `wheel`, `back`, or `forward`.
-
#
-
# @see OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click#button
-
1
module Button
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
LEFT = :left
-
1
RIGHT = :right
-
1
WHEEL = :wheel
-
1
BACK = :back
-
1
FORWARD = :forward
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
1
class DoubleClick < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# Specifies the event type. For a double click action, this property is always set
-
# to `double_click`.
-
#
-
# @return [Symbol, :double_click]
-
1
required :type, const: :double_click
-
-
# @!attribute x
-
# The x-coordinate where the double click occurred.
-
#
-
# @return [Integer]
-
1
required :x, Integer
-
-
# @!attribute y_
-
# The y-coordinate where the double click occurred.
-
#
-
# @return [Integer]
-
1
required :y_, Integer, api_name: :y
-
-
# @!method initialize(x:, y_:, type: :double_click)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick} for
-
# more details.
-
#
-
# A double click action.
-
#
-
# @param x [Integer] The x-coordinate where the double click occurred.
-
#
-
# @param y_ [Integer] The y-coordinate where the double click occurred.
-
#
-
# @param type [Symbol, :double_click] Specifies the event type. For a double click action, this property is
-
end
-
-
1
class Drag < OpenAI::Internal::Type::BaseModel
-
# @!attribute path
-
# An array of coordinates representing the path of the drag action. Coordinates
-
# will appear as an array of objects, eg
-
#
-
# ```
-
# [
-
# { x: 100, y: 200 },
-
# { x: 200, y: 300 }
-
# ]
-
# ```
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path>]
-
1
required :path,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCall::Action::Drag::Path]
-
}
-
-
# @!attribute type
-
# Specifies the event type. For a drag action, this property is always set to
-
# `drag`.
-
#
-
# @return [Symbol, :drag]
-
1
required :type, const: :drag
-
-
# @!method initialize(path:, type: :drag)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag} for more
-
# details.
-
#
-
# A drag action.
-
#
-
# @param path [Array<OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path>] An array of coordinates representing the path of the drag action. Coordinates wi
-
#
-
# @param type [Symbol, :drag] Specifies the event type. For a drag action, this property is
-
-
1
class Path < OpenAI::Internal::Type::BaseModel
-
# @!attribute x
-
# The x-coordinate.
-
#
-
# @return [Integer]
-
1
required :x, Integer
-
-
# @!attribute y_
-
# The y-coordinate.
-
#
-
# @return [Integer]
-
1
required :y_, Integer, api_name: :y
-
-
# @!method initialize(x:, y_:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag::Path} for
-
# more details.
-
#
-
# A series of x/y coordinate pairs in the drag path.
-
#
-
# @param x [Integer] The x-coordinate.
-
#
-
# @param y_ [Integer] The y-coordinate.
-
end
-
end
-
-
1
class Keypress < OpenAI::Internal::Type::BaseModel
-
# @!attribute keys
-
# The combination of keys the model is requesting to be pressed. This is an array
-
# of strings, each representing a key.
-
#
-
# @return [Array<String>]
-
1
required :keys, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute type
-
# Specifies the event type. For a keypress action, this property is always set to
-
# `keypress`.
-
#
-
# @return [Symbol, :keypress]
-
1
required :type, const: :keypress
-
-
# @!method initialize(keys:, type: :keypress)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress} for more
-
# details.
-
#
-
# A collection of keypresses the model would like to perform.
-
#
-
# @param keys [Array<String>] The combination of keys the model is requesting to be pressed. This is an
-
#
-
# @param type [Symbol, :keypress] Specifies the event type. For a keypress action, this property is
-
end
-
-
1
class Move < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# Specifies the event type. For a move action, this property is always set to
-
# `move`.
-
#
-
# @return [Symbol, :move]
-
1
required :type, const: :move
-
-
# @!attribute x
-
# The x-coordinate to move to.
-
#
-
# @return [Integer]
-
1
required :x, Integer
-
-
# @!attribute y_
-
# The y-coordinate to move to.
-
#
-
# @return [Integer]
-
1
required :y_, Integer, api_name: :y
-
-
# @!method initialize(x:, y_:, type: :move)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move} for more
-
# details.
-
#
-
# A mouse move action.
-
#
-
# @param x [Integer] The x-coordinate to move to.
-
#
-
# @param y_ [Integer] The y-coordinate to move to.
-
#
-
# @param type [Symbol, :move] Specifies the event type. For a move action, this property is
-
end
-
-
1
class Screenshot < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# Specifies the event type. For a screenshot action, this property is always set
-
# to `screenshot`.
-
#
-
# @return [Symbol, :screenshot]
-
1
required :type, const: :screenshot
-
-
# @!method initialize(type: :screenshot)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot} for
-
# more details.
-
#
-
# A screenshot action.
-
#
-
# @param type [Symbol, :screenshot] Specifies the event type. For a screenshot action, this property is
-
end
-
-
1
class Scroll < OpenAI::Internal::Type::BaseModel
-
# @!attribute scroll_x
-
# The horizontal scroll distance.
-
#
-
# @return [Integer]
-
1
required :scroll_x, Integer
-
-
# @!attribute scroll_y
-
# The vertical scroll distance.
-
#
-
# @return [Integer]
-
1
required :scroll_y, Integer
-
-
# @!attribute type
-
# Specifies the event type. For a scroll action, this property is always set to
-
# `scroll`.
-
#
-
# @return [Symbol, :scroll]
-
1
required :type, const: :scroll
-
-
# @!attribute x
-
# The x-coordinate where the scroll occurred.
-
#
-
# @return [Integer]
-
1
required :x, Integer
-
-
# @!attribute y_
-
# The y-coordinate where the scroll occurred.
-
#
-
# @return [Integer]
-
1
required :y_, Integer, api_name: :y
-
-
# @!method initialize(scroll_x:, scroll_y:, x:, y_:, type: :scroll)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll} for more
-
# details.
-
#
-
# A scroll action.
-
#
-
# @param scroll_x [Integer] The horizontal scroll distance.
-
#
-
# @param scroll_y [Integer] The vertical scroll distance.
-
#
-
# @param x [Integer] The x-coordinate where the scroll occurred.
-
#
-
# @param y_ [Integer] The y-coordinate where the scroll occurred.
-
#
-
# @param type [Symbol, :scroll] Specifies the event type. For a scroll action, this property is
-
end
-
-
1
class Type < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The text to type.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# Specifies the event type. For a type action, this property is always set to
-
# `type`.
-
#
-
# @return [Symbol, :type]
-
1
required :type, const: :type
-
-
# @!method initialize(text:, type: :type)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type} for more
-
# details.
-
#
-
# An action to type in text.
-
#
-
# @param text [String] The text to type.
-
#
-
# @param type [Symbol, :type] Specifies the event type. For a type action, this property is
-
end
-
-
1
class Wait < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# Specifies the event type. For a wait action, this property is always set to
-
# `wait`.
-
#
-
# @return [Symbol, :wait]
-
1
required :type, const: :wait
-
-
# @!method initialize(type: :wait)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait} for more
-
# details.
-
#
-
# A wait action.
-
#
-
# @param type [Symbol, :wait] Specifies the event type. For a wait action, this property is
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Responses::ResponseComputerToolCall::Action::Click, OpenAI::Models::Responses::ResponseComputerToolCall::Action::DoubleClick, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Drag, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Keypress, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Move, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Screenshot, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Scroll, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Type, OpenAI::Models::Responses::ResponseComputerToolCall::Action::Wait)]
-
end
-
-
1
class PendingSafetyCheck < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The ID of the pending safety check.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute code
-
# The type of the pending safety check.
-
#
-
# @return [String]
-
1
required :code, String
-
-
# @!attribute message
-
# Details about the pending safety check.
-
#
-
# @return [String]
-
1
required :message, String
-
-
# @!method initialize(id:, code:, message:)
-
# A pending safety check for the computer call.
-
#
-
# @param id [String] The ID of the pending safety check.
-
#
-
# @param code [String] The type of the pending safety check.
-
#
-
# @param message [String] Details about the pending safety check.
-
end
-
-
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
-
# Populated when items are returned via API.
-
#
-
# @see OpenAI::Models::Responses::ResponseComputerToolCall#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
INCOMPLETE = :incomplete
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The type of the computer call. Always `computer_call`.
-
#
-
# @see OpenAI::Models::Responses::ResponseComputerToolCall#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
COMPUTER_CALL = :computer_call
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseComputerToolCallOutputItem < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the computer call tool output.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute call_id
-
# The ID of the computer tool call that produced the output.
-
#
-
# @return [String]
-
1
required :call_id, String
-
-
# @!attribute output
-
# A computer screenshot image used with the computer use tool.
-
#
-
# @return [OpenAI::Models::Responses::ResponseComputerToolCallOutputScreenshot]
-
1
required :output, -> { OpenAI::Responses::ResponseComputerToolCallOutputScreenshot }
-
-
# @!attribute type
-
# The type of the computer tool call output. Always `computer_call_output`.
-
#
-
# @return [Symbol, :computer_call_output]
-
1
required :type, const: :computer_call_output
-
-
# @!attribute acknowledged_safety_checks
-
# The safety checks reported by the API that have been acknowledged by the
-
# developer.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck>, nil]
-
1
optional :acknowledged_safety_checks,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck]
-
}
-
-
# @!attribute status
-
# The status of the message input. One of `in_progress`, `completed`, or
-
# `incomplete`. Populated when input items are returned via API.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::Status, nil]
-
1
optional :status, enum: -> { OpenAI::Responses::ResponseComputerToolCallOutputItem::Status }
-
-
# @!method initialize(id:, call_id:, output:, acknowledged_safety_checks: nil, status: nil, type: :computer_call_output)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseComputerToolCallOutputItem} for more
-
# details.
-
#
-
# @param id [String] The unique ID of the computer call tool output.
-
#
-
# @param call_id [String] The ID of the computer tool call that produced the output.
-
#
-
# @param output [OpenAI::Models::Responses::ResponseComputerToolCallOutputScreenshot] A computer screenshot image used with the computer use tool.
-
#
-
# @param acknowledged_safety_checks [Array<OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::AcknowledgedSafetyCheck>] The safety checks reported by the API that have been acknowledged by the
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem::Status] The status of the message input. One of `in_progress`, `completed`, or
-
#
-
# @param type [Symbol, :computer_call_output] The type of the computer tool call output. Always `computer_call_output`.
-
-
1
class AcknowledgedSafetyCheck < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The ID of the pending safety check.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute code
-
# The type of the pending safety check.
-
#
-
# @return [String]
-
1
required :code, String
-
-
# @!attribute message
-
# Details about the pending safety check.
-
#
-
# @return [String]
-
1
required :message, String
-
-
# @!method initialize(id:, code:, message:)
-
# A pending safety check for the computer call.
-
#
-
# @param id [String] The ID of the pending safety check.
-
#
-
# @param code [String] The type of the pending safety check.
-
#
-
# @param message [String] Details about the pending safety check.
-
end
-
-
# The status of the message input. One of `in_progress`, `completed`, or
-
# `incomplete`. Populated when input items are returned via API.
-
#
-
# @see OpenAI::Models::Responses::ResponseComputerToolCallOutputItem#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
INCOMPLETE = :incomplete
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseComputerToolCallOutputScreenshot < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# Specifies the event type. For a computer screenshot, this property is always set
-
# to `computer_screenshot`.
-
#
-
# @return [Symbol, :computer_screenshot]
-
1
required :type, const: :computer_screenshot
-
-
# @!attribute file_id
-
# The identifier of an uploaded file that contains the screenshot.
-
#
-
# @return [String, nil]
-
1
optional :file_id, String
-
-
# @!attribute image_url
-
# The URL of the screenshot image.
-
#
-
# @return [String, nil]
-
1
optional :image_url, String
-
-
# @!method initialize(file_id: nil, image_url: nil, type: :computer_screenshot)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseComputerToolCallOutputScreenshot} for more
-
# details.
-
#
-
# A computer screenshot image used with the computer use tool.
-
#
-
# @param file_id [String] The identifier of an uploaded file that contains the screenshot.
-
#
-
# @param image_url [String] The URL of the screenshot image.
-
#
-
# @param type [Symbol, :computer_screenshot] Specifies the event type. For a computer screenshot, this property is
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# Multi-modal input and output contents.
-
1
module ResponseContent
-
1
extend OpenAI::Internal::Type::Union
-
-
# A text input to the model.
-
1
variant -> { OpenAI::Responses::ResponseInputText }
-
-
# An image input to the model. Learn about [image inputs](https://platform.openai.com/docs/guides/vision).
-
1
variant -> { OpenAI::Responses::ResponseInputImage }
-
-
# A file input to the model.
-
1
variant -> { OpenAI::Responses::ResponseInputFile }
-
-
# A text output from the model.
-
1
variant -> { OpenAI::Responses::ResponseOutputText }
-
-
# A refusal from the model.
-
1
variant -> { OpenAI::Responses::ResponseOutputRefusal }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile, OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseContentPartAddedEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute content_index
-
# The index of the content part that was added.
-
#
-
# @return [Integer]
-
1
required :content_index, Integer
-
-
# @!attribute item_id
-
# The ID of the output item that the content part was added to.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item that the content part was added to.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute part
-
# The content part that was added.
-
#
-
# @return [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal]
-
1
required :part, union: -> { OpenAI::Responses::ResponseContentPartAddedEvent::Part }
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.content_part.added`.
-
#
-
# @return [Symbol, :"response.content_part.added"]
-
1
required :type, const: :"response.content_part.added"
-
-
# @!method initialize(content_index:, item_id:, output_index:, part:, sequence_number:, type: :"response.content_part.added")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseContentPartAddedEvent} for more details.
-
#
-
# Emitted when a new content part is added.
-
#
-
# @param content_index [Integer] The index of the content part that was added.
-
#
-
# @param item_id [String] The ID of the output item that the content part was added to.
-
#
-
# @param output_index [Integer] The index of the output item that the content part was added to.
-
#
-
# @param part [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal] The content part that was added.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.content_part.added"] The type of the event. Always `response.content_part.added`.
-
-
# The content part that was added.
-
#
-
# @see OpenAI::Models::Responses::ResponseContentPartAddedEvent#part
-
1
module Part
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A text output from the model.
-
1
variant :output_text, -> { OpenAI::Responses::ResponseOutputText }
-
-
# A refusal from the model.
-
1
variant :refusal, -> { OpenAI::Responses::ResponseOutputRefusal }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseContentPartDoneEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute content_index
-
# The index of the content part that is done.
-
#
-
# @return [Integer]
-
1
required :content_index, Integer
-
-
# @!attribute item_id
-
# The ID of the output item that the content part was added to.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item that the content part was added to.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute part
-
# The content part that is done.
-
#
-
# @return [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal]
-
1
required :part, union: -> { OpenAI::Responses::ResponseContentPartDoneEvent::Part }
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.content_part.done`.
-
#
-
# @return [Symbol, :"response.content_part.done"]
-
1
required :type, const: :"response.content_part.done"
-
-
# @!method initialize(content_index:, item_id:, output_index:, part:, sequence_number:, type: :"response.content_part.done")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseContentPartDoneEvent} for more details.
-
#
-
# Emitted when a content part is done.
-
#
-
# @param content_index [Integer] The index of the content part that is done.
-
#
-
# @param item_id [String] The ID of the output item that the content part was added to.
-
#
-
# @param output_index [Integer] The index of the output item that the content part was added to.
-
#
-
# @param part [OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal] The content part that is done.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.content_part.done"] The type of the event. Always `response.content_part.done`.
-
-
# The content part that is done.
-
#
-
# @see OpenAI::Models::Responses::ResponseContentPartDoneEvent#part
-
1
module Part
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A text output from the model.
-
1
variant :output_text, -> { OpenAI::Responses::ResponseOutputText }
-
-
# A refusal from the model.
-
1
variant :refusal, -> { OpenAI::Responses::ResponseOutputRefusal }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# @see OpenAI::Resources::Responses#create
-
#
-
# @see OpenAI::Resources::Responses#stream_raw
-
1
class ResponseCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute background
-
# Whether to run the model response in the background.
-
# [Learn more](https://platform.openai.com/docs/guides/background).
-
#
-
# @return [Boolean, nil]
-
1
optional :background, OpenAI::Internal::Type::Boolean, nil?: true
-
-
# @!attribute include
-
# Specify additional output data to include in the model response. Currently
-
# supported values are:
-
#
-
# - `code_interpreter_call.outputs`: Includes the outputs of python code execution
-
# in code interpreter tool call items.
-
# - `computer_call_output.output.image_url`: Include image urls from the computer
-
# call output.
-
# - `file_search_call.results`: Include the search results of the file search tool
-
# call.
-
# - `message.input_image.image_url`: Include image urls from the input message.
-
# - `message.output_text.logprobs`: Include logprobs with assistant messages.
-
# - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
-
# tokens in reasoning item outputs. This enables reasoning items to be used in
-
# multi-turn conversations when using the Responses API statelessly (like when
-
# the `store` parameter is set to `false`, or when an organization is enrolled
-
# in the zero data retention program).
-
#
-
# @return [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil]
-
1
optional :include,
-
-> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Responses::ResponseIncludable] },
-
nil?: true
-
-
# @!attribute input
-
# Text, image, or file inputs to the model, used to generate a response.
-
#
-
# Learn more:
-
#
-
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-
# - [Image inputs](https://platform.openai.com/docs/guides/images)
-
# - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
-
# - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
-
# - [Function calling](https://platform.openai.com/docs/guides/function-calling)
-
#
-
# @return [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>, nil]
-
3
optional :input, union: -> { OpenAI::Responses::ResponseCreateParams::Input }
-
-
# @!attribute instructions
-
# A system (or developer) message inserted into the model's context.
-
#
-
# When using along with `previous_response_id`, the instructions from a previous
-
# response will not be carried over to the next response. This makes it simple to
-
# swap out system (or developer) messages in new responses.
-
#
-
# @return [String, nil]
-
1
optional :instructions, String, nil?: true
-
-
# @!attribute max_output_tokens
-
# An upper bound for the number of tokens that can be generated for a response,
-
# including visible output tokens and
-
# [reasoning tokens](https://platform.openai.com/docs/guides/reasoning).
-
#
-
# @return [Integer, nil]
-
1
optional :max_output_tokens, Integer, nil?: true
-
-
# @!attribute max_tool_calls
-
# The maximum number of total calls to built-in tools that can be processed in a
-
# response. This maximum number applies across all built-in tool calls, not per
-
# individual tool. Any further attempts to call a tool by the model will be
-
# ignored.
-
#
-
# @return [Integer, nil]
-
1
optional :max_tool_calls, Integer, nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute model
-
# Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
-
# wide range of models with different capabilities, performance characteristics,
-
# and price points. Refer to the
-
# [model guide](https://platform.openai.com/docs/models) to browse and compare
-
# available models.
-
#
-
# @return [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel, nil]
-
3
optional :model, union: -> { OpenAI::ResponsesModel }
-
-
# @!attribute parallel_tool_calls
-
# Whether to allow the model to run tool calls in parallel.
-
#
-
# @return [Boolean, nil]
-
1
optional :parallel_tool_calls, OpenAI::Internal::Type::Boolean, nil?: true
-
-
# @!attribute previous_response_id
-
# The unique ID of the previous response to the model. Use this to create
-
# multi-turn conversations. Learn more about
-
# [conversation state](https://platform.openai.com/docs/guides/conversation-state).
-
#
-
# @return [String, nil]
-
1
optional :previous_response_id, String, nil?: true
-
-
# @!attribute prompt
-
# Reference to a prompt template and its variables.
-
# [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
-
#
-
# @return [OpenAI::Models::Responses::ResponsePrompt, nil]
-
1
optional :prompt, -> { OpenAI::Responses::ResponsePrompt }, nil?: true
-
-
# @!attribute prompt_cache_key
-
# Used by OpenAI to cache responses for similar requests to optimize your cache
-
# hit rates. Replaces the `user` field.
-
# [Learn more](https://platform.openai.com/docs/guides/prompt-caching).
-
#
-
# @return [String, nil]
-
1
optional :prompt_cache_key, String
-
-
# @!attribute reasoning
-
# **o-series models only**
-
#
-
# Configuration options for
-
# [reasoning models](https://platform.openai.com/docs/guides/reasoning).
-
#
-
# @return [OpenAI::Models::Reasoning, nil]
-
1
optional :reasoning, -> { OpenAI::Reasoning }, nil?: true
-
-
# @!attribute safety_identifier
-
# A stable identifier used to help detect users of your application that may be
-
# violating OpenAI's usage policies. The IDs should be a string that uniquely
-
# identifies each user. We recommend hashing their username or email address, in
-
# order to avoid sending us any identifying information.
-
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
-
#
-
# @return [String, nil]
-
1
optional :safety_identifier, String
-
-
# @!attribute service_tier
-
# Specifies the processing type used for serving the request.
-
#
-
# - If set to 'auto', then the request will be processed with the service tier
-
# configured in the Project settings. Unless otherwise configured, the Project
-
# will use 'default'.
-
# - If set to 'default', then the request will be processed with the standard
-
# pricing and performance for the selected model.
-
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-
# 'priority', then the request will be processed with the corresponding service
-
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-
# Priority processing.
-
# - When not set, the default behavior is 'auto'.
-
#
-
# When the `service_tier` parameter is set, the response body will include the
-
# `service_tier` value based on the processing mode actually used to serve the
-
# request. This response value may be different from the value set in the
-
# parameter.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil]
-
1
optional :service_tier, enum: -> { OpenAI::Responses::ResponseCreateParams::ServiceTier }, nil?: true
-
-
# @!attribute store
-
# Whether to store the generated model response for later retrieval via API.
-
#
-
# @return [Boolean, nil]
-
1
optional :store, OpenAI::Internal::Type::Boolean, nil?: true
-
-
# @!attribute temperature
-
# What sampling temperature to use, between 0 and 2. Higher values like 0.8 will
-
# make the output more random, while lower values like 0.2 will make it more
-
# focused and deterministic. We generally recommend altering this or `top_p` but
-
# not both.
-
#
-
# @return [Float, nil]
-
1
optional :temperature, Float, nil?: true
-
-
# @!attribute text
-
# Configuration options for a text response from the model. Can be plain text or
-
# structured JSON data. Learn more:
-
#
-
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
#
-
# @return [OpenAI::Models::Responses::ResponseTextConfig, nil]
-
1
optional :text,
-
union: -> {
-
2
OpenAI::UnionOf[
-
OpenAI::Responses::ResponseTextConfig,
-
OpenAI::StructuredOutput::JsonSchemaConverter
-
]
-
}
-
-
# @!attribute tool_choice
-
# How the model should select which tool (or tools) to use when generating a
-
# response. See the `tools` parameter to see how to specify which tools the model
-
# can call.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp, nil]
-
1
optional :tool_choice, union: -> { OpenAI::Responses::ResponseCreateParams::ToolChoice }
-
-
# @!attribute tools
-
# An array of tools the model may call while generating a response. You can
-
# specify which tool to use by setting the `tool_choice` parameter.
-
#
-
# The two categories of tools you can provide the model are:
-
#
-
# - **Built-in tools**: Tools that are provided by OpenAI that extend the model's
-
# capabilities, like
-
# [web search](https://platform.openai.com/docs/guides/tools-web-search) or
-
# [file search](https://platform.openai.com/docs/guides/tools-file-search).
-
# Learn more about
-
# [built-in tools](https://platform.openai.com/docs/guides/tools).
-
# - **Function calls (custom tools)**: Functions that are defined by you, enabling
-
# the model to call your own code. Learn more about
-
# [function calling](https://platform.openai.com/docs/guides/function-calling).
-
#
-
# @return [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>, nil]
-
1
optional :tools, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::Tool] }
-
-
# @!attribute top_logprobs
-
# An integer between 0 and 20 specifying the number of most likely tokens to
-
# return at each token position, each with an associated log probability.
-
#
-
# @return [Integer, nil]
-
1
optional :top_logprobs, Integer, nil?: true
-
-
# @!attribute top_p
-
# An alternative to sampling with temperature, called nucleus sampling, where the
-
# model considers the results of the tokens with top_p probability mass. So 0.1
-
# means only the tokens comprising the top 10% probability mass are considered.
-
#
-
# We generally recommend altering this or `temperature` but not both.
-
#
-
# @return [Float, nil]
-
1
optional :top_p, Float, nil?: true
-
-
# @!attribute truncation
-
# The truncation strategy to use for the model response.
-
#
-
# - `auto`: If the context of this response and previous ones exceeds the model's
-
# context window size, the model will truncate the response to fit the context
-
# window by dropping input items in the middle of the conversation.
-
# - `disabled` (default): If a model response will exceed the context window size
-
# for a model, the request will fail with a 400 error.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil]
-
1
optional :truncation, enum: -> { OpenAI::Responses::ResponseCreateParams::Truncation }, nil?: true
-
-
# @!attribute user
-
# @deprecated
-
#
-
# This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
-
# `prompt_cache_key` instead to maintain caching optimizations. A stable
-
# identifier for your end-users. Used to boost cache hit rates by better bucketing
-
# similar requests and to help OpenAI detect and prevent abuse.
-
# [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#safety-identifiers).
-
#
-
# @return [String, nil]
-
1
optional :user, String
-
-
# @!method initialize(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseCreateParams} for more details.
-
#
-
# @param background [Boolean, nil] Whether to run the model response in the background.
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
-
#
-
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
-
#
-
# @param instructions [String, nil] A system (or developer) message inserted into the model's context.
-
#
-
# @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
-
#
-
# @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
-
#
-
# @param parallel_tool_calls [Boolean, nil] Whether to allow the model to run tool calls in parallel.
-
#
-
# @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
-
#
-
# @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
-
#
-
# @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
-
#
-
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
-
#
-
# @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
-
#
-
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
-
#
-
# @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
-
#
-
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
-
#
-
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::StructuredOutput::JsonSchemaConverter, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
-
#
-
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
-
#
-
# @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
-
#
-
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Text, image, or file inputs to the model, used to generate a response.
-
#
-
# Learn more:
-
#
-
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-
# - [Image inputs](https://platform.openai.com/docs/guides/images)
-
# - [File inputs](https://platform.openai.com/docs/guides/pdf-files)
-
# - [Conversation state](https://platform.openai.com/docs/guides/conversation-state)
-
# - [Function calling](https://platform.openai.com/docs/guides/function-calling)
-
1
module Input
-
1
extend OpenAI::Internal::Type::Union
-
-
# A text input to the model, equivalent to a text input with the
-
# `user` role.
-
1
variant String
-
-
# A list of one or many input items to the model, containing
-
# different content types.
-
3
variant -> { OpenAI::Responses::ResponseInput }
-
-
# @!method self.variants
-
# @return [Array(String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>)]
-
end
-
-
# Specifies the processing type used for serving the request.
-
#
-
# - If set to 'auto', then the request will be processed with the service tier
-
# configured in the Project settings. Unless otherwise configured, the Project
-
# will use 'default'.
-
# - If set to 'default', then the request will be processed with the standard
-
# pricing and performance for the selected model.
-
# - If set to '[flex](https://platform.openai.com/docs/guides/flex-processing)' or
-
# 'priority', then the request will be processed with the corresponding service
-
# tier. [Contact sales](https://openai.com/contact-sales) to learn more about
-
# Priority processing.
-
# - When not set, the default behavior is 'auto'.
-
#
-
# When the `service_tier` parameter is set, the response body will include the
-
# `service_tier` value based on the processing mode actually used to serve the
-
# request. This response value may be different from the value set in the
-
# parameter.
-
1
module ServiceTier
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
DEFAULT = :default
-
1
FLEX = :flex
-
1
SCALE = :scale
-
1
PRIORITY = :priority
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# How the model should select which tool (or tools) to use when generating a
-
# response. See the `tools` parameter to see how to specify which tools the model
-
# can call.
-
1
module ToolChoice
-
1
extend OpenAI::Internal::Type::Union
-
-
# Controls which (if any) tool is called by the model.
-
#
-
# `none` means the model will not call any tool and instead generates a message.
-
#
-
# `auto` means the model can pick between generating a message or calling one or
-
# more tools.
-
#
-
# `required` means the model must call one or more tools.
-
1
variant enum: -> { OpenAI::Responses::ToolChoiceOptions }
-
-
# Indicates that the model should use a built-in tool to generate a response.
-
# [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools).
-
1
variant -> { OpenAI::Responses::ToolChoiceTypes }
-
-
# Use this option to force the model to call a specific function.
-
1
variant -> { OpenAI::Responses::ToolChoiceFunction }
-
-
# Use this option to force the model to call a specific tool on a remote MCP server.
-
1
variant -> { OpenAI::Responses::ToolChoiceMcp }
-
-
# @!method self.variants
-
# @return [Array(Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp)]
-
end
-
-
# The truncation strategy to use for the model response.
-
#
-
# - `auto`: If the context of this response and previous ones exceeds the model's
-
# context window size, the model will truncate the response to fit the context
-
# window by dropping input items in the middle of the conversation.
-
# - `disabled` (default): If a model response will exceed the context window size
-
# for a model, the request will fail with a 400 error.
-
1
module Truncation
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
DISABLED = :disabled
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseCreatedEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute response
-
# The response that was created.
-
#
-
# @return [OpenAI::Models::Responses::Response]
-
1
required :response, -> { OpenAI::Responses::Response }
-
-
# @!attribute sequence_number
-
# The sequence number for this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.created`.
-
#
-
# @return [Symbol, :"response.created"]
-
1
required :type, const: :"response.created"
-
-
# @!method initialize(response:, sequence_number:, type: :"response.created")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseCreatedEvent} for more details.
-
#
-
# An event that is emitted when a response is created.
-
#
-
# @param response [OpenAI::Models::Responses::Response] The response that was created.
-
#
-
# @param sequence_number [Integer] The sequence number for this event.
-
#
-
# @param type [Symbol, :"response.created"] The type of the event. Always `response.created`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# @see OpenAI::Resources::Responses#delete
-
1
class ResponseDeleteParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseError < OpenAI::Internal::Type::BaseModel
-
# @!attribute code
-
# The error code for the response.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseError::Code]
-
1
required :code, enum: -> { OpenAI::Responses::ResponseError::Code }
-
-
# @!attribute message
-
# A human-readable description of the error.
-
#
-
# @return [String]
-
1
required :message, String
-
-
# @!method initialize(code:, message:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseError} for more details.
-
#
-
# An error object returned when the model fails to generate a Response.
-
#
-
# @param code [Symbol, OpenAI::Models::Responses::ResponseError::Code] The error code for the response.
-
#
-
# @param message [String] A human-readable description of the error.
-
-
# The error code for the response.
-
#
-
# @see OpenAI::Models::Responses::ResponseError#code
-
1
module Code
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
SERVER_ERROR = :server_error
-
1
RATE_LIMIT_EXCEEDED = :rate_limit_exceeded
-
1
INVALID_PROMPT = :invalid_prompt
-
1
VECTOR_STORE_TIMEOUT = :vector_store_timeout
-
1
INVALID_IMAGE = :invalid_image
-
1
INVALID_IMAGE_FORMAT = :invalid_image_format
-
1
INVALID_BASE64_IMAGE = :invalid_base64_image
-
1
INVALID_IMAGE_URL = :invalid_image_url
-
1
IMAGE_TOO_LARGE = :image_too_large
-
1
IMAGE_TOO_SMALL = :image_too_small
-
1
IMAGE_PARSE_ERROR = :image_parse_error
-
1
IMAGE_CONTENT_POLICY_VIOLATION = :image_content_policy_violation
-
1
INVALID_IMAGE_MODE = :invalid_image_mode
-
1
IMAGE_FILE_TOO_LARGE = :image_file_too_large
-
1
UNSUPPORTED_IMAGE_MEDIA_TYPE = :unsupported_image_media_type
-
1
EMPTY_IMAGE_FILE = :empty_image_file
-
1
FAILED_TO_DOWNLOAD_IMAGE = :failed_to_download_image
-
1
IMAGE_FILE_NOT_FOUND = :image_file_not_found
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseErrorEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute code
-
# The error code.
-
#
-
# @return [String, nil]
-
1
required :code, String, nil?: true
-
-
# @!attribute message
-
# The error message.
-
#
-
# @return [String]
-
1
required :message, String
-
-
# @!attribute param
-
# The error parameter.
-
#
-
# @return [String, nil]
-
1
required :param, String, nil?: true
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `error`.
-
#
-
# @return [Symbol, :error]
-
1
required :type, const: :error
-
-
# @!method initialize(code:, message:, param:, sequence_number:, type: :error)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseErrorEvent} for more details.
-
#
-
# Emitted when an error occurs.
-
#
-
# @param code [String, nil] The error code.
-
#
-
# @param message [String] The error message.
-
#
-
# @param param [String, nil] The error parameter.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :error] The type of the event. Always `error`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseFailedEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute response
-
# The response that failed.
-
#
-
# @return [OpenAI::Models::Responses::Response]
-
1
required :response, -> { OpenAI::Responses::Response }
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.failed`.
-
#
-
# @return [Symbol, :"response.failed"]
-
1
required :type, const: :"response.failed"
-
-
# @!method initialize(response:, sequence_number:, type: :"response.failed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseFailedEvent} for more details.
-
#
-
# An event that is emitted when a response fails.
-
#
-
# @param response [OpenAI::Models::Responses::Response] The response that failed.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.failed"] The type of the event. Always `response.failed`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseFileSearchCallCompletedEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The ID of the output item that the file search call is initiated.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item that the file search call is initiated.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.file_search_call.completed`.
-
#
-
# @return [Symbol, :"response.file_search_call.completed"]
-
1
required :type, const: :"response.file_search_call.completed"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.file_search_call.completed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent} for more
-
# details.
-
#
-
# Emitted when a file search call is completed (results found).
-
#
-
# @param item_id [String] The ID of the output item that the file search call is initiated.
-
#
-
# @param output_index [Integer] The index of the output item that the file search call is initiated.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.file_search_call.completed"] The type of the event. Always `response.file_search_call.completed`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseFileSearchCallInProgressEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The ID of the output item that the file search call is initiated.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item that the file search call is initiated.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.file_search_call.in_progress`.
-
#
-
# @return [Symbol, :"response.file_search_call.in_progress"]
-
1
required :type, const: :"response.file_search_call.in_progress"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.file_search_call.in_progress")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent} for more
-
# details.
-
#
-
# Emitted when a file search call is initiated.
-
#
-
# @param item_id [String] The ID of the output item that the file search call is initiated.
-
#
-
# @param output_index [Integer] The index of the output item that the file search call is initiated.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.file_search_call.in_progress"] The type of the event. Always `response.file_search_call.in_progress`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseFileSearchCallSearchingEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The ID of the output item that the file search call is initiated.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item that the file search call is searching.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.file_search_call.searching`.
-
#
-
# @return [Symbol, :"response.file_search_call.searching"]
-
1
required :type, const: :"response.file_search_call.searching"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.file_search_call.searching")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent} for more
-
# details.
-
#
-
# Emitted when a file search is currently searching.
-
#
-
# @param item_id [String] The ID of the output item that the file search call is initiated.
-
#
-
# @param output_index [Integer] The index of the output item that the file search call is searching.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.file_search_call.searching"] The type of the event. Always `response.file_search_call.searching`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseFileSearchToolCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the file search tool call.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute queries
-
# The queries used to search for files.
-
#
-
# @return [Array<String>]
-
1
required :queries, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute status
-
# The status of the file search tool call. One of `in_progress`, `searching`,
-
# `incomplete` or `failed`,
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseFileSearchToolCall::Status]
-
1
required :status, enum: -> { OpenAI::Responses::ResponseFileSearchToolCall::Status }
-
-
# @!attribute type
-
# The type of the file search tool call. Always `file_search_call`.
-
#
-
# @return [Symbol, :file_search_call]
-
1
required :type, const: :file_search_call
-
-
# @!attribute results
-
# The results of the file search tool call.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseFileSearchToolCall::Result>, nil]
-
1
optional :results,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseFileSearchToolCall::Result]
-
},
-
nil?: true
-
-
# @!method initialize(id:, queries:, status:, results: nil, type: :file_search_call)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseFileSearchToolCall} for more details.
-
#
-
# The results of a file search tool call. See the
-
# [file search guide](https://platform.openai.com/docs/guides/tools-file-search)
-
# for more information.
-
#
-
# @param id [String] The unique ID of the file search tool call.
-
#
-
# @param queries [Array<String>] The queries used to search for files.
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseFileSearchToolCall::Status] The status of the file search tool call. One of `in_progress`,
-
#
-
# @param results [Array<OpenAI::Models::Responses::ResponseFileSearchToolCall::Result>, nil] The results of the file search tool call.
-
#
-
# @param type [Symbol, :file_search_call] The type of the file search tool call. Always `file_search_call`.
-
-
# The status of the file search tool call. One of `in_progress`, `searching`,
-
# `incomplete` or `failed`,
-
#
-
# @see OpenAI::Models::Responses::ResponseFileSearchToolCall#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
SEARCHING = :searching
-
1
COMPLETED = :completed
-
1
INCOMPLETE = :incomplete
-
1
FAILED = :failed
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
class Result < OpenAI::Internal::Type::BaseModel
-
# @!attribute attributes
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard. Keys are strings with a maximum
-
# length of 64 characters. Values are strings with a maximum length of 512
-
# characters, booleans, or numbers.
-
#
-
# @return [Hash{Symbol=>String, Float, Boolean}, nil]
-
1
optional :attributes,
-
-> {
-
OpenAI::Internal::Type::HashOf[union: OpenAI::Responses::ResponseFileSearchToolCall::Result::Attribute]
-
},
-
nil?: true
-
-
# @!attribute file_id
-
# The unique ID of the file.
-
#
-
# @return [String, nil]
-
1
optional :file_id, String
-
-
# @!attribute filename
-
# The name of the file.
-
#
-
# @return [String, nil]
-
1
optional :filename, String
-
-
# @!attribute score
-
# The relevance score of the file - a value between 0 and 1.
-
#
-
# @return [Float, nil]
-
1
optional :score, Float
-
-
# @!attribute text
-
# The text that was retrieved from the file.
-
#
-
# @return [String, nil]
-
1
optional :text, String
-
-
# @!method initialize(attributes: nil, file_id: nil, filename: nil, score: nil, text: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseFileSearchToolCall::Result} for more
-
# details.
-
#
-
# @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param file_id [String] The unique ID of the file.
-
#
-
# @param filename [String] The name of the file.
-
#
-
# @param score [Float] The relevance score of the file - a value between 0 and 1.
-
#
-
# @param text [String] The text that was retrieved from the file.
-
-
1
module Attribute
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
1
variant Float
-
-
1
variant OpenAI::Internal::Type::Boolean
-
-
# @!method self.variants
-
# @return [Array(String, Float, Boolean)]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# An object specifying the format that the model must output.
-
#
-
# Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
-
# ensures the model will match your supplied JSON schema. Learn more in the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# The default format is `{ "type": "text" }` with no additional options.
-
#
-
# **Not recommended for gpt-4o and newer models:**
-
#
-
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
-
# ensures the message the model generates is valid JSON. Using `json_schema` is
-
# preferred for models that support it.
-
1
module ResponseFormatTextConfig
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# Default response format. Used to generate text responses.
-
1
variant :text, -> { OpenAI::ResponseFormatText }
-
-
# JSON Schema response format. Used to generate structured JSON responses.
-
# Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
-
1
variant :json_schema, -> { OpenAI::Responses::ResponseFormatTextJSONSchemaConfig }
-
-
# An {OpenAI::BaseModel} can be provided and implicitly converted into {OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig}.
-
# See examples for more details.
-
#
-
# Learn more about [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
-
1
variant -> { OpenAI::StructuredOutput::JsonSchemaConverter }
-
-
# JSON object response format. An older method of generating JSON responses.
-
# Using `json_schema` is recommended for models that support it. Note that the
-
# model will not generate JSON without a system or user message instructing it
-
# to do so.
-
1
variant :json_object, -> { OpenAI::ResponseFormatJSONObject }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseFormatTextJSONSchemaConfig < OpenAI::Internal::Type::BaseModel
-
# @!attribute name
-
# The name of the response format. Must be a-z, A-Z, 0-9, or contain underscores
-
# and dashes, with a maximum length of 64.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute schema
-
# The schema for the response format, described as a JSON Schema object. Learn how
-
# to build JSON schemas [here](https://json-schema.org/).
-
#
-
# @return [Hash{Symbol=>Object}, OpenAI::StructuredOutput::JsonSchemaConverter]
-
1
required :schema,
-
union: -> {
-
OpenAI::UnionOf[
-
OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown], OpenAI::StructuredOutput::JsonSchemaConverter
-
]
-
}
-
-
# @!attribute type
-
# The type of response format being defined. Always `json_schema`.
-
#
-
# @return [Symbol, :json_schema]
-
1
required :type, const: :json_schema
-
-
# @!attribute description
-
# A description of what the response format is for, used by the model to determine
-
# how to respond in the format.
-
#
-
# @return [String, nil]
-
1
optional :description, String
-
-
# @!attribute strict
-
# Whether to enable strict schema adherence when generating the output. If set to
-
# true, the model will always follow the exact schema defined in the `schema`
-
# field. Only a subset of JSON Schema is supported when `strict` is `true`. To
-
# learn more, read the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# @return [Boolean, nil]
-
1
optional :strict, OpenAI::Internal::Type::Boolean, nil?: true
-
-
# @!method initialize(name:, schema:, description: nil, strict: nil, type: :json_schema)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig} for more
-
# details.
-
#
-
# JSON Schema response format. Used to generate structured JSON responses. Learn
-
# more about
-
# [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# @param name [String] The name of the response format. Must be a-z, A-Z, 0-9, or contain
-
#
-
# @param schema [Hash{Symbol=>Object}, OpenAI::StructuredOutput::JsonSchemaConverter] The schema for the response format, described as a JSON Schema object.
-
#
-
# @param description [String] A description of what the response format is for, used by the model to
-
#
-
# @param strict [Boolean, nil] Whether to enable strict schema adherence when generating the output.
-
#
-
# @param type [Symbol, :json_schema] The type of response format being defined. Always `json_schema`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseFunctionCallArgumentsDeltaEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute delta
-
# The function-call arguments delta that is added.
-
#
-
# @return [String]
-
1
required :delta, String
-
-
# @!attribute item_id
-
# The ID of the output item that the function-call arguments delta is added to.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item that the function-call arguments delta is added to.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.function_call_arguments.delta`.
-
#
-
# @return [Symbol, :"response.function_call_arguments.delta"]
-
1
required :type, const: :"response.function_call_arguments.delta"
-
-
# @!method initialize(delta:, item_id:, output_index:, sequence_number:, type: :"response.function_call_arguments.delta")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent} for more
-
# details.
-
#
-
# Emitted when there is a partial function-call arguments delta.
-
#
-
# @param delta [String] The function-call arguments delta that is added.
-
#
-
# @param item_id [String] The ID of the output item that the function-call arguments delta is added to.
-
#
-
# @param output_index [Integer] The index of the output item that the function-call arguments delta is added to.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.function_call_arguments.delta"] The type of the event. Always `response.function_call_arguments.delta`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseFunctionCallArgumentsDoneEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute arguments
-
# The function-call arguments.
-
#
-
# @return [String]
-
1
required :arguments, String
-
-
# @!attribute item_id
-
# The ID of the item.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
#
-
# @return [Symbol, :"response.function_call_arguments.done"]
-
1
required :type, const: :"response.function_call_arguments.done"
-
-
# @!method initialize(arguments:, item_id:, output_index:, sequence_number:, type: :"response.function_call_arguments.done")
-
# Emitted when function-call arguments are finalized.
-
#
-
# @param arguments [String] The function-call arguments.
-
#
-
# @param item_id [String] The ID of the item.
-
#
-
# @param output_index [Integer] The index of the output item.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.function_call_arguments.done"]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseFunctionToolCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute arguments
-
# A JSON string of the arguments to pass to the function.
-
#
-
# @return [String]
-
1
required :arguments, String
-
-
# @!attribute parsed
-
# The parsed contents of the arguments.
-
#
-
# @return [Object, nil]
-
1
required :parsed, OpenAI::StructuredOutput::ParsedJson
-
-
# @!attribute call_id
-
# The unique ID of the function tool call generated by the model.
-
#
-
# @return [String]
-
1
required :call_id, String
-
-
# @!attribute name
-
# The name of the function to run.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute type
-
# The type of the function tool call. Always `function_call`.
-
#
-
# @return [Symbol, :function_call]
-
1
required :type, const: :function_call
-
-
# @!attribute id
-
# The unique ID of the function tool call.
-
#
-
# @return [String, nil]
-
1
optional :id, String
-
-
# @!attribute status
-
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
-
# Populated when items are returned via API.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseFunctionToolCall::Status, nil]
-
1
optional :status, enum: -> { OpenAI::Responses::ResponseFunctionToolCall::Status }
-
-
# @!method initialize(arguments:, call_id:, name:, id: nil, status: nil, type: :function_call)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseFunctionToolCall} for more details.
-
#
-
# A tool call to run a function. See the
-
# [function calling guide](https://platform.openai.com/docs/guides/function-calling)
-
# for more information.
-
#
-
# @param arguments [String] A JSON string of the arguments to pass to the function.
-
#
-
# @param call_id [String] The unique ID of the function tool call generated by the model.
-
#
-
# @param name [String] The name of the function to run.
-
#
-
# @param id [String] The unique ID of the function tool call.
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseFunctionToolCall::Status] The status of the item. One of `in_progress`, `completed`, or
-
#
-
# @param type [Symbol, :function_call] The type of the function tool call. Always `function_call`.
-
-
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
-
# Populated when items are returned via API.
-
#
-
# @see OpenAI::Models::Responses::ResponseFunctionToolCall#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
INCOMPLETE = :incomplete
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseFunctionToolCallItem < OpenAI::Models::Responses::ResponseFunctionToolCall
-
# @!attribute id
-
# The unique ID of the function tool call.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!method initialize(id:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseFunctionToolCallItem} for more details.
-
#
-
# A tool call to run a function. See the
-
# [function calling guide](https://platform.openai.com/docs/guides/function-calling)
-
# for more information.
-
#
-
# @param id [String] The unique ID of the function tool call.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseFunctionToolCallOutputItem < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the function call tool output.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute call_id
-
# The unique ID of the function tool call generated by the model.
-
#
-
# @return [String]
-
1
required :call_id, String
-
-
# @!attribute output
-
# A JSON string of the output of the function tool call.
-
#
-
# @return [String]
-
1
required :output, String
-
-
# @!attribute type
-
# The type of the function tool call output. Always `function_call_output`.
-
#
-
# @return [Symbol, :function_call_output]
-
1
required :type, const: :function_call_output
-
-
# @!attribute status
-
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
-
# Populated when items are returned via API.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem::Status, nil]
-
1
optional :status, enum: -> { OpenAI::Responses::ResponseFunctionToolCallOutputItem::Status }
-
-
# @!method initialize(id:, call_id:, output:, status: nil, type: :function_call_output)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem} for more
-
# details.
-
#
-
# @param id [String] The unique ID of the function call tool output.
-
#
-
# @param call_id [String] The unique ID of the function tool call generated by the model.
-
#
-
# @param output [String] A JSON string of the output of the function tool call.
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem::Status] The status of the item. One of `in_progress`, `completed`, or
-
#
-
# @param type [Symbol, :function_call_output] The type of the function tool call output. Always `function_call_output`.
-
-
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
-
# Populated when items are returned via API.
-
#
-
# @see OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
INCOMPLETE = :incomplete
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseFunctionWebSearch < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the web search tool call.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute action
-
# An object describing the specific action taken in this web search call. Includes
-
# details on how the model used the web (search, open_page, find).
-
#
-
# @return [OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::OpenPage, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Find]
-
1
required :action, union: -> { OpenAI::Responses::ResponseFunctionWebSearch::Action }
-
-
# @!attribute status
-
# The status of the web search tool call.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseFunctionWebSearch::Status]
-
1
required :status, enum: -> { OpenAI::Responses::ResponseFunctionWebSearch::Status }
-
-
# @!attribute type
-
# The type of the web search tool call. Always `web_search_call`.
-
#
-
# @return [Symbol, :web_search_call]
-
1
required :type, const: :web_search_call
-
-
# @!method initialize(id:, action:, status:, type: :web_search_call)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseFunctionWebSearch} for more details.
-
#
-
# The results of a web search tool call. See the
-
# [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for
-
# more information.
-
#
-
# @param id [String] The unique ID of the web search tool call.
-
#
-
# @param action [OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::OpenPage, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Find] An object describing the specific action taken in this web search call.
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseFunctionWebSearch::Status] The status of the web search tool call.
-
#
-
# @param type [Symbol, :web_search_call] The type of the web search tool call. Always `web_search_call`.
-
-
# An object describing the specific action taken in this web search call. Includes
-
# details on how the model used the web (search, open_page, find).
-
#
-
# @see OpenAI::Models::Responses::ResponseFunctionWebSearch#action
-
1
module Action
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# Action type "search" - Performs a web search query.
-
1
variant :search, -> { OpenAI::Responses::ResponseFunctionWebSearch::Action::Search }
-
-
# Action type "open_page" - Opens a specific URL from search results.
-
1
variant :open_page, -> { OpenAI::Responses::ResponseFunctionWebSearch::Action::OpenPage }
-
-
# Action type "find": Searches for a pattern within a loaded page.
-
1
variant :find, -> { OpenAI::Responses::ResponseFunctionWebSearch::Action::Find }
-
-
1
class Search < OpenAI::Internal::Type::BaseModel
-
# @!attribute query
-
# The search query.
-
#
-
# @return [String]
-
1
required :query, String
-
-
# @!attribute type
-
# The action type.
-
#
-
# @return [Symbol, :search]
-
1
required :type, const: :search
-
-
# @!method initialize(query:, type: :search)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search} for more
-
# details.
-
#
-
# Action type "search" - Performs a web search query.
-
#
-
# @param query [String] The search query.
-
#
-
# @param type [Symbol, :search] The action type.
-
end
-
-
1
class OpenPage < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The action type.
-
#
-
# @return [Symbol, :open_page]
-
1
required :type, const: :open_page
-
-
# @!attribute url
-
# The URL opened by the model.
-
#
-
# @return [String]
-
1
required :url, String
-
-
# @!method initialize(url:, type: :open_page)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::OpenPage} for
-
# more details.
-
#
-
# Action type "open_page" - Opens a specific URL from search results.
-
#
-
# @param url [String] The URL opened by the model.
-
#
-
# @param type [Symbol, :open_page] The action type.
-
end
-
-
1
class Find < OpenAI::Internal::Type::BaseModel
-
# @!attribute pattern
-
# The pattern or text to search for within the page.
-
#
-
# @return [String]
-
1
required :pattern, String
-
-
# @!attribute type
-
# The action type.
-
#
-
# @return [Symbol, :find]
-
1
required :type, const: :find
-
-
# @!attribute url
-
# The URL of the page searched for the pattern.
-
#
-
# @return [String]
-
1
required :url, String
-
-
# @!method initialize(pattern:, url:, type: :find)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Find} for more
-
# details.
-
#
-
# Action type "find": Searches for a pattern within a loaded page.
-
#
-
# @param pattern [String] The pattern or text to search for within the page.
-
#
-
# @param url [String] The URL of the page searched for the pattern.
-
#
-
# @param type [Symbol, :find] The action type.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Search, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::OpenPage, OpenAI::Models::Responses::ResponseFunctionWebSearch::Action::Find)]
-
end
-
-
# The status of the web search tool call.
-
#
-
# @see OpenAI::Models::Responses::ResponseFunctionWebSearch#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
SEARCHING = :searching
-
1
COMPLETED = :completed
-
1
FAILED = :failed
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseImageGenCallCompletedEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The unique identifier of the image generation item being processed.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item in the response's output array.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always 'response.image_generation_call.completed'.
-
#
-
# @return [Symbol, :"response.image_generation_call.completed"]
-
1
required :type, const: :"response.image_generation_call.completed"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.image_generation_call.completed")
-
# Emitted when an image generation tool call has completed and the final image is
-
# available.
-
#
-
# @param item_id [String] The unique identifier of the image generation item being processed.
-
#
-
# @param output_index [Integer] The index of the output item in the response's output array.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.image_generation_call.completed"] The type of the event. Always 'response.image_generation_call.completed'.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseImageGenCallGeneratingEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The unique identifier of the image generation item being processed.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item in the response's output array.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of the image generation item being processed.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always 'response.image_generation_call.generating'.
-
#
-
# @return [Symbol, :"response.image_generation_call.generating"]
-
1
required :type, const: :"response.image_generation_call.generating"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.image_generation_call.generating")
-
# Emitted when an image generation tool call is actively generating an image
-
# (intermediate state).
-
#
-
# @param item_id [String] The unique identifier of the image generation item being processed.
-
#
-
# @param output_index [Integer] The index of the output item in the response's output array.
-
#
-
# @param sequence_number [Integer] The sequence number of the image generation item being processed.
-
#
-
# @param type [Symbol, :"response.image_generation_call.generating"] The type of the event. Always 'response.image_generation_call.generating'.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseImageGenCallInProgressEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The unique identifier of the image generation item being processed.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item in the response's output array.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of the image generation item being processed.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always 'response.image_generation_call.in_progress'.
-
#
-
# @return [Symbol, :"response.image_generation_call.in_progress"]
-
1
required :type, const: :"response.image_generation_call.in_progress"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.image_generation_call.in_progress")
-
# Emitted when an image generation tool call is in progress.
-
#
-
# @param item_id [String] The unique identifier of the image generation item being processed.
-
#
-
# @param output_index [Integer] The index of the output item in the response's output array.
-
#
-
# @param sequence_number [Integer] The sequence number of the image generation item being processed.
-
#
-
# @param type [Symbol, :"response.image_generation_call.in_progress"] The type of the event. Always 'response.image_generation_call.in_progress'.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseImageGenCallPartialImageEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The unique identifier of the image generation item being processed.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item in the response's output array.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute partial_image_b64
-
# Base64-encoded partial image data, suitable for rendering as an image.
-
#
-
# @return [String]
-
1
required :partial_image_b64, String
-
-
# @!attribute partial_image_index
-
# 0-based index for the partial image (backend is 1-based, but this is 0-based for
-
# the user).
-
#
-
# @return [Integer]
-
1
required :partial_image_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of the image generation item being processed.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always 'response.image_generation_call.partial_image'.
-
#
-
# @return [Symbol, :"response.image_generation_call.partial_image"]
-
1
required :type, const: :"response.image_generation_call.partial_image"
-
-
# @!method initialize(item_id:, output_index:, partial_image_b64:, partial_image_index:, sequence_number:, type: :"response.image_generation_call.partial_image")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent} for more
-
# details.
-
#
-
# Emitted when a partial image is available during image generation streaming.
-
#
-
# @param item_id [String] The unique identifier of the image generation item being processed.
-
#
-
# @param output_index [Integer] The index of the output item in the response's output array.
-
#
-
# @param partial_image_b64 [String] Base64-encoded partial image data, suitable for rendering as an image.
-
#
-
# @param partial_image_index [Integer] 0-based index for the partial image (backend is 1-based, but this is 0-based for
-
#
-
# @param sequence_number [Integer] The sequence number of the image generation item being processed.
-
#
-
# @param type [Symbol, :"response.image_generation_call.partial_image"] The type of the event. Always 'response.image_generation_call.partial_image'.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseInProgressEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute response
-
# The response that is in progress.
-
#
-
# @return [OpenAI::Models::Responses::Response]
-
1
required :response, -> { OpenAI::Responses::Response }
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.in_progress`.
-
#
-
# @return [Symbol, :"response.in_progress"]
-
1
required :type, const: :"response.in_progress"
-
-
# @!method initialize(response:, sequence_number:, type: :"response.in_progress")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseInProgressEvent} for more details.
-
#
-
# Emitted when the response is in progress.
-
#
-
# @param response [OpenAI::Models::Responses::Response] The response that is in progress.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.in_progress"] The type of the event. Always `response.in_progress`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# Specify additional output data to include in the model response. Currently
-
# supported values are:
-
#
-
# - `code_interpreter_call.outputs`: Includes the outputs of python code execution
-
# in code interpreter tool call items.
-
# - `computer_call_output.output.image_url`: Include image urls from the computer
-
# call output.
-
# - `file_search_call.results`: Include the search results of the file search tool
-
# call.
-
# - `message.input_image.image_url`: Include image urls from the input message.
-
# - `message.output_text.logprobs`: Include logprobs with assistant messages.
-
# - `reasoning.encrypted_content`: Includes an encrypted version of reasoning
-
# tokens in reasoning item outputs. This enables reasoning items to be used in
-
# multi-turn conversations when using the Responses API statelessly (like when
-
# the `store` parameter is set to `false`, or when an organization is enrolled
-
# in the zero data retention program).
-
1
module ResponseIncludable
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
CODE_INTERPRETER_CALL_OUTPUTS = :"code_interpreter_call.outputs"
-
1
COMPUTER_CALL_OUTPUT_OUTPUT_IMAGE_URL = :"computer_call_output.output.image_url"
-
1
FILE_SEARCH_CALL_RESULTS = :"file_search_call.results"
-
1
MESSAGE_INPUT_IMAGE_IMAGE_URL = :"message.input_image.image_url"
-
1
MESSAGE_OUTPUT_TEXT_LOGPROBS = :"message.output_text.logprobs"
-
1
REASONING_ENCRYPTED_CONTENT = :"reasoning.encrypted_content"
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseIncompleteEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute response
-
# The response that was incomplete.
-
#
-
# @return [OpenAI::Models::Responses::Response]
-
1
required :response, -> { OpenAI::Responses::Response }
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.incomplete`.
-
#
-
# @return [Symbol, :"response.incomplete"]
-
1
required :type, const: :"response.incomplete"
-
-
# @!method initialize(response:, sequence_number:, type: :"response.incomplete")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseIncompleteEvent} for more details.
-
#
-
# An event that is emitted when a response finishes as incomplete.
-
#
-
# @param response [OpenAI::Models::Responses::Response] The response that was incomplete.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.incomplete"] The type of the event. Always `response.incomplete`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# @type [OpenAI::Internal::Type::Converter]
-
3
ResponseInput = OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Responses::ResponseInputItem }]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseInputAudio < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Base64-encoded audio data.
-
#
-
# @return [String]
-
1
required :data, String
-
-
# @!attribute format_
-
# The format of the audio data. Currently supported formats are `mp3` and `wav`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseInputAudio::Format]
-
1
required :format_, enum: -> { OpenAI::Responses::ResponseInputAudio::Format }, api_name: :format
-
-
# @!attribute type
-
# The type of the input item. Always `input_audio`.
-
#
-
# @return [Symbol, :input_audio]
-
1
required :type, const: :input_audio
-
-
# @!method initialize(data:, format_:, type: :input_audio)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseInputAudio} for more details.
-
#
-
# An audio input to the model.
-
#
-
# @param data [String] Base64-encoded audio data.
-
#
-
# @param format_ [Symbol, OpenAI::Models::Responses::ResponseInputAudio::Format] The format of the audio data. Currently supported formats are `mp3` and
-
#
-
# @param type [Symbol, :input_audio] The type of the input item. Always `input_audio`.
-
-
# The format of the audio data. Currently supported formats are `mp3` and `wav`.
-
#
-
# @see OpenAI::Models::Responses::ResponseInputAudio#format_
-
1
module Format
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
MP3 = :mp3
-
1
WAV = :wav
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# A text input to the model.
-
1
module ResponseInputContent
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A text input to the model.
-
1
variant :input_text, -> { OpenAI::Responses::ResponseInputText }
-
-
# An image input to the model. Learn about [image inputs](https://platform.openai.com/docs/guides/vision).
-
1
variant :input_image, -> { OpenAI::Responses::ResponseInputImage }
-
-
# A file input to the model.
-
1
variant :input_file, -> { OpenAI::Responses::ResponseInputFile }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseInputFile < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of the input item. Always `input_file`.
-
#
-
# @return [Symbol, :input_file]
-
1
required :type, const: :input_file
-
-
# @!attribute file_data
-
# The content of the file to be sent to the model.
-
#
-
# @return [String, nil]
-
1
optional :file_data, String
-
-
# @!attribute file_id
-
# The ID of the file to be sent to the model.
-
#
-
# @return [String, nil]
-
1
optional :file_id, String, nil?: true
-
-
# @!attribute file_url
-
# The URL of the file to be sent to the model.
-
#
-
# @return [String, nil]
-
1
optional :file_url, String
-
-
# @!attribute filename
-
# The name of the file to be sent to the model.
-
#
-
# @return [String, nil]
-
1
optional :filename, String
-
-
# @!method initialize(file_data: nil, file_id: nil, file_url: nil, filename: nil, type: :input_file)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseInputFile} for more details.
-
#
-
# A file input to the model.
-
#
-
# @param file_data [String] The content of the file to be sent to the model.
-
#
-
# @param file_id [String, nil] The ID of the file to be sent to the model.
-
#
-
# @param file_url [String] The URL of the file to be sent to the model.
-
#
-
# @param filename [String] The name of the file to be sent to the model.
-
#
-
# @param type [Symbol, :input_file] The type of the input item. Always `input_file`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseInputImage < OpenAI::Internal::Type::BaseModel
-
# @!attribute detail
-
# The detail level of the image to be sent to the model. One of `high`, `low`, or
-
# `auto`. Defaults to `auto`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseInputImage::Detail]
-
1
required :detail, enum: -> { OpenAI::Responses::ResponseInputImage::Detail }
-
-
# @!attribute type
-
# The type of the input item. Always `input_image`.
-
#
-
# @return [Symbol, :input_image]
-
1
required :type, const: :input_image
-
-
# @!attribute file_id
-
# The ID of the file to be sent to the model.
-
#
-
# @return [String, nil]
-
1
optional :file_id, String, nil?: true
-
-
# @!attribute image_url
-
# The URL of the image to be sent to the model. A fully qualified URL or base64
-
# encoded image in a data URL.
-
#
-
# @return [String, nil]
-
1
optional :image_url, String, nil?: true
-
-
# @!method initialize(detail:, file_id: nil, image_url: nil, type: :input_image)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseInputImage} for more details.
-
#
-
# An image input to the model. Learn about
-
# [image inputs](https://platform.openai.com/docs/guides/vision).
-
#
-
# @param detail [Symbol, OpenAI::Models::Responses::ResponseInputImage::Detail] The detail level of the image to be sent to the model. One of `high`, `low`, or
-
#
-
# @param file_id [String, nil] The ID of the file to be sent to the model.
-
#
-
# @param image_url [String, nil] The URL of the image to be sent to the model. A fully qualified URL or base64 en
-
#
-
# @param type [Symbol, :input_image] The type of the input item. Always `input_image`.
-
-
# The detail level of the image to be sent to the model. One of `high`, `low`, or
-
# `auto`. Defaults to `auto`.
-
#
-
# @see OpenAI::Models::Responses::ResponseInputImage#detail
-
1
module Detail
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
LOW = :low
-
1
HIGH = :high
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
1
module ResponseInputItem
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role. Messages with the
-
# `assistant` role are presumed to have been generated by the model in previous
-
# interactions.
-
3
variant :message, -> { OpenAI::Responses::EasyInputMessage }
-
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role.
-
3
variant :message, -> { OpenAI::Responses::ResponseInputItem::Message }
-
-
# An output message from the model.
-
3
variant :message, -> { OpenAI::Responses::ResponseOutputMessage }
-
-
# The results of a file search tool call. See the
-
# [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information.
-
3
variant :file_search_call, -> { OpenAI::Responses::ResponseFileSearchToolCall }
-
-
# A tool call to a computer use tool. See the
-
# [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information.
-
3
variant :computer_call, -> { OpenAI::Responses::ResponseComputerToolCall }
-
-
# The output of a computer tool call.
-
3
variant :computer_call_output, -> { OpenAI::Responses::ResponseInputItem::ComputerCallOutput }
-
-
# The results of a web search tool call. See the
-
# [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information.
-
3
variant :web_search_call, -> { OpenAI::Responses::ResponseFunctionWebSearch }
-
-
# A tool call to run a function. See the
-
# [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information.
-
3
variant :function_call, -> { OpenAI::Responses::ResponseFunctionToolCall }
-
-
# The output of a function tool call.
-
3
variant :function_call_output, -> { OpenAI::Responses::ResponseInputItem::FunctionCallOutput }
-
-
# A description of the chain of thought used by a reasoning model while generating
-
# a response. Be sure to include these items in your `input` to the Responses API
-
# for subsequent turns of a conversation if you are manually
-
# [managing context](https://platform.openai.com/docs/guides/conversation-state).
-
3
variant :reasoning, -> { OpenAI::Responses::ResponseReasoningItem }
-
-
# An image generation request made by the model.
-
3
variant :image_generation_call, -> { OpenAI::Responses::ResponseInputItem::ImageGenerationCall }
-
-
# A tool call to run code.
-
3
variant :code_interpreter_call, -> { OpenAI::Responses::ResponseCodeInterpreterToolCall }
-
-
# A tool call to run a command on the local shell.
-
3
variant :local_shell_call, -> { OpenAI::Responses::ResponseInputItem::LocalShellCall }
-
-
# The output of a local shell tool call.
-
3
variant :local_shell_call_output, -> { OpenAI::Responses::ResponseInputItem::LocalShellCallOutput }
-
-
# A list of tools available on an MCP server.
-
3
variant :mcp_list_tools, -> { OpenAI::Responses::ResponseInputItem::McpListTools }
-
-
# A request for human approval of a tool invocation.
-
3
variant :mcp_approval_request, -> { OpenAI::Responses::ResponseInputItem::McpApprovalRequest }
-
-
# A response to an MCP approval request.
-
3
variant :mcp_approval_response, -> { OpenAI::Responses::ResponseInputItem::McpApprovalResponse }
-
-
# An invocation of a tool on an MCP server.
-
3
variant :mcp_call, -> { OpenAI::Responses::ResponseInputItem::McpCall }
-
-
# An internal identifier for an item to reference.
-
3
variant :item_reference, -> { OpenAI::Responses::ResponseInputItem::ItemReference }
-
-
1
class Message < OpenAI::Internal::Type::BaseModel
-
# @!attribute content
-
# A list of one or many input items to the model, containing different content
-
# types.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>]
-
1
required :content,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent]
-
}
-
-
# @!attribute role
-
# The role of the message input. One of `user`, `system`, or `developer`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Role]
-
1
required :role, enum: -> { OpenAI::Responses::ResponseInputItem::Message::Role }
-
-
# @!attribute status
-
# The status of item. One of `in_progress`, `completed`, or `incomplete`.
-
# Populated when items are returned via API.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Status, nil]
-
1
optional :status, enum: -> { OpenAI::Responses::ResponseInputItem::Message::Status }
-
-
# @!attribute type
-
# The type of the message input. Always set to `message`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Type, nil]
-
1
optional :type, enum: -> { OpenAI::Responses::ResponseInputItem::Message::Type }
-
-
# @!method initialize(content:, role:, status: nil, type: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseInputItem::Message} for more details.
-
#
-
# A message input to the model with a role indicating instruction following
-
# hierarchy. Instructions given with the `developer` or `system` role take
-
# precedence over instructions given with the `user` role.
-
#
-
# @param content [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>] A list of one or many input items to the model, containing different content
-
#
-
# @param role [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Role] The role of the message input. One of `user`, `system`, or `developer`.
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Status] The status of item. One of `in_progress`, `completed`, or
-
#
-
# @param type [Symbol, OpenAI::Models::Responses::ResponseInputItem::Message::Type] The type of the message input. Always set to `message`.
-
-
# The role of the message input. One of `user`, `system`, or `developer`.
-
#
-
# @see OpenAI::Models::Responses::ResponseInputItem::Message#role
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
USER = :user
-
1
SYSTEM = :system
-
1
DEVELOPER = :developer
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The status of item. One of `in_progress`, `completed`, or `incomplete`.
-
# Populated when items are returned via API.
-
#
-
# @see OpenAI::Models::Responses::ResponseInputItem::Message#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
INCOMPLETE = :incomplete
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The type of the message input. Always set to `message`.
-
#
-
# @see OpenAI::Models::Responses::ResponseInputItem::Message#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
MESSAGE = :message
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
1
class ComputerCallOutput < OpenAI::Internal::Type::BaseModel
-
# @!attribute call_id
-
# The ID of the computer tool call that produced the output.
-
#
-
# @return [String]
-
1
required :call_id, String
-
-
# @!attribute output
-
# A computer screenshot image used with the computer use tool.
-
#
-
# @return [OpenAI::Models::Responses::ResponseComputerToolCallOutputScreenshot]
-
1
required :output, -> { OpenAI::Responses::ResponseComputerToolCallOutputScreenshot }
-
-
# @!attribute type
-
# The type of the computer tool call output. Always `computer_call_output`.
-
#
-
# @return [Symbol, :computer_call_output]
-
1
required :type, const: :computer_call_output
-
-
# @!attribute id
-
# The ID of the computer tool call output.
-
#
-
# @return [String, nil]
-
1
optional :id, String, nil?: true
-
-
# @!attribute acknowledged_safety_checks
-
# The safety checks reported by the API that have been acknowledged by the
-
# developer.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck>, nil]
-
1
optional :acknowledged_safety_checks,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck]
-
},
-
nil?: true
-
-
# @!attribute status
-
# The status of the message input. One of `in_progress`, `completed`, or
-
# `incomplete`. Populated when input items are returned via API.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Status, nil]
-
1
optional :status,
-
enum: -> { OpenAI::Responses::ResponseInputItem::ComputerCallOutput::Status },
-
nil?: true
-
-
# @!method initialize(call_id:, output:, id: nil, acknowledged_safety_checks: nil, status: nil, type: :computer_call_output)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput} for more
-
# details.
-
#
-
# The output of a computer tool call.
-
#
-
# @param call_id [String] The ID of the computer tool call that produced the output.
-
#
-
# @param output [OpenAI::Models::Responses::ResponseComputerToolCallOutputScreenshot] A computer screenshot image used with the computer use tool.
-
#
-
# @param id [String, nil] The ID of the computer tool call output.
-
#
-
# @param acknowledged_safety_checks [Array<OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::AcknowledgedSafetyCheck>, nil] The safety checks reported by the API that have been acknowledged by the develop
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput::Status, nil] The status of the message input. One of `in_progress`, `completed`, or `incomple
-
#
-
# @param type [Symbol, :computer_call_output] The type of the computer tool call output. Always `computer_call_output`.
-
-
1
class AcknowledgedSafetyCheck < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The ID of the pending safety check.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute code
-
# The type of the pending safety check.
-
#
-
# @return [String, nil]
-
1
optional :code, String, nil?: true
-
-
# @!attribute message
-
# Details about the pending safety check.
-
#
-
# @return [String, nil]
-
1
optional :message, String, nil?: true
-
-
# @!method initialize(id:, code: nil, message: nil)
-
# A pending safety check for the computer call.
-
#
-
# @param id [String] The ID of the pending safety check.
-
#
-
# @param code [String, nil] The type of the pending safety check.
-
#
-
# @param message [String, nil] Details about the pending safety check.
-
end
-
-
# The status of the message input. One of `in_progress`, `completed`, or
-
# `incomplete`. Populated when input items are returned via API.
-
#
-
# @see OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
INCOMPLETE = :incomplete
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
1
class FunctionCallOutput < OpenAI::Internal::Type::BaseModel
-
# @!attribute call_id
-
# The unique ID of the function tool call generated by the model.
-
#
-
# @return [String]
-
1
required :call_id, String
-
-
# @!attribute output
-
# A JSON string of the output of the function tool call.
-
#
-
# @return [String]
-
1
required :output, String
-
-
# @!attribute type
-
# The type of the function tool call output. Always `function_call_output`.
-
#
-
# @return [Symbol, :function_call_output]
-
1
required :type, const: :function_call_output
-
-
# @!attribute id
-
# The unique ID of the function tool call output. Populated when this item is
-
# returned via API.
-
#
-
# @return [String, nil]
-
1
optional :id, String, nil?: true
-
-
# @!attribute status
-
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
-
# Populated when items are returned via API.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::Status, nil]
-
1
optional :status,
-
enum: -> { OpenAI::Responses::ResponseInputItem::FunctionCallOutput::Status },
-
nil?: true
-
-
# @!method initialize(call_id:, output:, id: nil, status: nil, type: :function_call_output)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput} for more
-
# details.
-
#
-
# The output of a function tool call.
-
#
-
# @param call_id [String] The unique ID of the function tool call generated by the model.
-
#
-
# @param output [String] A JSON string of the output of the function tool call.
-
#
-
# @param id [String, nil] The unique ID of the function tool call output. Populated when this item is retu
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput::Status, nil] The status of the item. One of `in_progress`, `completed`, or `incomplete`. Popu
-
#
-
# @param type [Symbol, :function_call_output] The type of the function tool call output. Always `function_call_output`.
-
-
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
-
# Populated when items are returned via API.
-
#
-
# @see OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
INCOMPLETE = :incomplete
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
1
class ImageGenerationCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the image generation call.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute result
-
# The generated image encoded in base64.
-
#
-
# @return [String, nil]
-
1
required :result, String, nil?: true
-
-
# @!attribute status
-
# The status of the image generation call.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall::Status]
-
1
required :status, enum: -> { OpenAI::Responses::ResponseInputItem::ImageGenerationCall::Status }
-
-
# @!attribute type
-
# The type of the image generation call. Always `image_generation_call`.
-
#
-
# @return [Symbol, :image_generation_call]
-
1
required :type, const: :image_generation_call
-
-
# @!method initialize(id:, result:, status:, type: :image_generation_call)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall} for more
-
# details.
-
#
-
# An image generation request made by the model.
-
#
-
# @param id [String] The unique ID of the image generation call.
-
#
-
# @param result [String, nil] The generated image encoded in base64.
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall::Status] The status of the image generation call.
-
#
-
# @param type [Symbol, :image_generation_call] The type of the image generation call. Always `image_generation_call`.
-
-
# The status of the image generation call.
-
#
-
# @see OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
GENERATING = :generating
-
1
FAILED = :failed
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
1
class LocalShellCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the local shell call.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute action
-
# Execute a shell command on the server.
-
#
-
# @return [OpenAI::Models::Responses::ResponseInputItem::LocalShellCall::Action]
-
1
required :action, -> { OpenAI::Responses::ResponseInputItem::LocalShellCall::Action }
-
-
# @!attribute call_id
-
# The unique ID of the local shell tool call generated by the model.
-
#
-
# @return [String]
-
1
required :call_id, String
-
-
# @!attribute status
-
# The status of the local shell call.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall::Status]
-
1
required :status, enum: -> { OpenAI::Responses::ResponseInputItem::LocalShellCall::Status }
-
-
# @!attribute type
-
# The type of the local shell call. Always `local_shell_call`.
-
#
-
# @return [Symbol, :local_shell_call]
-
1
required :type, const: :local_shell_call
-
-
# @!method initialize(id:, action:, call_id:, status:, type: :local_shell_call)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseInputItem::LocalShellCall} for more details.
-
#
-
# A tool call to run a command on the local shell.
-
#
-
# @param id [String] The unique ID of the local shell call.
-
#
-
# @param action [OpenAI::Models::Responses::ResponseInputItem::LocalShellCall::Action] Execute a shell command on the server.
-
#
-
# @param call_id [String] The unique ID of the local shell tool call generated by the model.
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall::Status] The status of the local shell call.
-
#
-
# @param type [Symbol, :local_shell_call] The type of the local shell call. Always `local_shell_call`.
-
-
# @see OpenAI::Models::Responses::ResponseInputItem::LocalShellCall#action
-
1
class Action < OpenAI::Internal::Type::BaseModel
-
# @!attribute command
-
# The command to run.
-
#
-
# @return [Array<String>]
-
1
required :command, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute env
-
# Environment variables to set for the command.
-
#
-
# @return [Hash{Symbol=>String}]
-
1
required :env, OpenAI::Internal::Type::HashOf[String]
-
-
# @!attribute type
-
# The type of the local shell action. Always `exec`.
-
#
-
# @return [Symbol, :exec]
-
1
required :type, const: :exec
-
-
# @!attribute timeout_ms
-
# Optional timeout in milliseconds for the command.
-
#
-
# @return [Integer, nil]
-
1
optional :timeout_ms, Integer, nil?: true
-
-
# @!attribute user
-
# Optional user to run the command as.
-
#
-
# @return [String, nil]
-
1
optional :user, String, nil?: true
-
-
# @!attribute working_directory
-
# Optional working directory to run the command in.
-
#
-
# @return [String, nil]
-
1
optional :working_directory, String, nil?: true
-
-
# @!method initialize(command:, env:, timeout_ms: nil, user: nil, working_directory: nil, type: :exec)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseInputItem::LocalShellCall::Action} for more
-
# details.
-
#
-
# Execute a shell command on the server.
-
#
-
# @param command [Array<String>] The command to run.
-
#
-
# @param env [Hash{Symbol=>String}] Environment variables to set for the command.
-
#
-
# @param timeout_ms [Integer, nil] Optional timeout in milliseconds for the command.
-
#
-
# @param user [String, nil] Optional user to run the command as.
-
#
-
# @param working_directory [String, nil] Optional working directory to run the command in.
-
#
-
# @param type [Symbol, :exec] The type of the local shell action. Always `exec`.
-
end
-
-
# The status of the local shell call.
-
#
-
# @see OpenAI::Models::Responses::ResponseInputItem::LocalShellCall#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
INCOMPLETE = :incomplete
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
1
class LocalShellCallOutput < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the local shell tool call generated by the model.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute output
-
# A JSON string of the output of the local shell tool call.
-
#
-
# @return [String]
-
1
required :output, String
-
-
# @!attribute type
-
# The type of the local shell tool call output. Always `local_shell_call_output`.
-
#
-
# @return [Symbol, :local_shell_call_output]
-
1
required :type, const: :local_shell_call_output
-
-
# @!attribute status
-
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput::Status, nil]
-
1
optional :status,
-
enum: -> { OpenAI::Responses::ResponseInputItem::LocalShellCallOutput::Status },
-
nil?: true
-
-
# @!method initialize(id:, output:, status: nil, type: :local_shell_call_output)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput} for more
-
# details.
-
#
-
# The output of a local shell tool call.
-
#
-
# @param id [String] The unique ID of the local shell tool call generated by the model.
-
#
-
# @param output [String] A JSON string of the output of the local shell tool call.
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput::Status, nil] The status of the item. One of `in_progress`, `completed`, or `incomplete`.
-
#
-
# @param type [Symbol, :local_shell_call_output] The type of the local shell tool call output. Always `local_shell_call_output`.
-
-
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
-
#
-
# @see OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
INCOMPLETE = :incomplete
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
1
class McpListTools < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the list.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute server_label
-
# The label of the MCP server.
-
#
-
# @return [String]
-
1
required :server_label, String
-
-
# @!attribute tools
-
# The tools available on the server.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseInputItem::McpListTools::Tool>]
-
1
required :tools,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseInputItem::McpListTools::Tool]
-
}
-
-
# @!attribute type
-
# The type of the item. Always `mcp_list_tools`.
-
#
-
# @return [Symbol, :mcp_list_tools]
-
1
required :type, const: :mcp_list_tools
-
-
# @!attribute error
-
# Error message if the server could not list tools.
-
#
-
# @return [String, nil]
-
1
optional :error, String, nil?: true
-
-
# @!method initialize(id:, server_label:, tools:, error: nil, type: :mcp_list_tools)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseInputItem::McpListTools} for more details.
-
#
-
# A list of tools available on an MCP server.
-
#
-
# @param id [String] The unique ID of the list.
-
#
-
# @param server_label [String] The label of the MCP server.
-
#
-
# @param tools [Array<OpenAI::Models::Responses::ResponseInputItem::McpListTools::Tool>] The tools available on the server.
-
#
-
# @param error [String, nil] Error message if the server could not list tools.
-
#
-
# @param type [Symbol, :mcp_list_tools] The type of the item. Always `mcp_list_tools`.
-
-
1
class Tool < OpenAI::Internal::Type::BaseModel
-
# @!attribute input_schema
-
# The JSON schema describing the tool's input.
-
#
-
# @return [Object]
-
1
required :input_schema, OpenAI::Internal::Type::Unknown
-
-
# @!attribute name
-
# The name of the tool.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute annotations
-
# Additional annotations about the tool.
-
#
-
# @return [Object, nil]
-
1
optional :annotations, OpenAI::Internal::Type::Unknown, nil?: true
-
-
# @!attribute description
-
# The description of the tool.
-
#
-
# @return [String, nil]
-
1
optional :description, String, nil?: true
-
-
# @!method initialize(input_schema:, name:, annotations: nil, description: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseInputItem::McpListTools::Tool} for more
-
# details.
-
#
-
# A tool available on an MCP server.
-
#
-
# @param input_schema [Object] The JSON schema describing the tool's input.
-
#
-
# @param name [String] The name of the tool.
-
#
-
# @param annotations [Object, nil] Additional annotations about the tool.
-
#
-
# @param description [String, nil] The description of the tool.
-
end
-
end
-
-
1
class McpApprovalRequest < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the approval request.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute arguments
-
# A JSON string of arguments for the tool.
-
#
-
# @return [String]
-
1
required :arguments, String
-
-
# @!attribute name
-
# The name of the tool to run.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute server_label
-
# The label of the MCP server making the request.
-
#
-
# @return [String]
-
1
required :server_label, String
-
-
# @!attribute type
-
# The type of the item. Always `mcp_approval_request`.
-
#
-
# @return [Symbol, :mcp_approval_request]
-
1
required :type, const: :mcp_approval_request
-
-
# @!method initialize(id:, arguments:, name:, server_label:, type: :mcp_approval_request)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest} for more
-
# details.
-
#
-
# A request for human approval of a tool invocation.
-
#
-
# @param id [String] The unique ID of the approval request.
-
#
-
# @param arguments [String] A JSON string of arguments for the tool.
-
#
-
# @param name [String] The name of the tool to run.
-
#
-
# @param server_label [String] The label of the MCP server making the request.
-
#
-
# @param type [Symbol, :mcp_approval_request] The type of the item. Always `mcp_approval_request`.
-
end
-
-
1
class McpApprovalResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute approval_request_id
-
# The ID of the approval request being answered.
-
#
-
# @return [String]
-
1
required :approval_request_id, String
-
-
# @!attribute approve
-
# Whether the request was approved.
-
#
-
# @return [Boolean]
-
1
required :approve, OpenAI::Internal::Type::Boolean
-
-
# @!attribute type
-
# The type of the item. Always `mcp_approval_response`.
-
#
-
# @return [Symbol, :mcp_approval_response]
-
1
required :type, const: :mcp_approval_response
-
-
# @!attribute id
-
# The unique ID of the approval response
-
#
-
# @return [String, nil]
-
1
optional :id, String, nil?: true
-
-
# @!attribute reason
-
# Optional reason for the decision.
-
#
-
# @return [String, nil]
-
1
optional :reason, String, nil?: true
-
-
# @!method initialize(approval_request_id:, approve:, id: nil, reason: nil, type: :mcp_approval_response)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse} for more
-
# details.
-
#
-
# A response to an MCP approval request.
-
#
-
# @param approval_request_id [String] The ID of the approval request being answered.
-
#
-
# @param approve [Boolean] Whether the request was approved.
-
#
-
# @param id [String, nil] The unique ID of the approval response
-
#
-
# @param reason [String, nil] Optional reason for the decision.
-
#
-
# @param type [Symbol, :mcp_approval_response] The type of the item. Always `mcp_approval_response`.
-
end
-
-
1
class McpCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the tool call.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute arguments
-
# A JSON string of the arguments passed to the tool.
-
#
-
# @return [String]
-
1
required :arguments, String
-
-
# @!attribute name
-
# The name of the tool that was run.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute server_label
-
# The label of the MCP server running the tool.
-
#
-
# @return [String]
-
1
required :server_label, String
-
-
# @!attribute type
-
# The type of the item. Always `mcp_call`.
-
#
-
# @return [Symbol, :mcp_call]
-
1
required :type, const: :mcp_call
-
-
# @!attribute error
-
# The error from the tool call, if any.
-
#
-
# @return [String, nil]
-
1
optional :error, String, nil?: true
-
-
# @!attribute output
-
# The output from the tool call.
-
#
-
# @return [String, nil]
-
1
optional :output, String, nil?: true
-
-
# @!method initialize(id:, arguments:, name:, server_label:, error: nil, output: nil, type: :mcp_call)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseInputItem::McpCall} for more details.
-
#
-
# An invocation of a tool on an MCP server.
-
#
-
# @param id [String] The unique ID of the tool call.
-
#
-
# @param arguments [String] A JSON string of the arguments passed to the tool.
-
#
-
# @param name [String] The name of the tool that was run.
-
#
-
# @param server_label [String] The label of the MCP server running the tool.
-
#
-
# @param error [String, nil] The error from the tool call, if any.
-
#
-
# @param output [String, nil] The output from the tool call.
-
#
-
# @param type [Symbol, :mcp_call] The type of the item. Always `mcp_call`.
-
end
-
-
1
class ItemReference < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The ID of the item to reference.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute type
-
# The type of item to reference. Always `item_reference`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseInputItem::ItemReference::Type, nil]
-
1
optional :type, enum: -> { OpenAI::Responses::ResponseInputItem::ItemReference::Type }, nil?: true
-
-
# @!method initialize(id:, type: nil)
-
# An internal identifier for an item to reference.
-
#
-
# @param id [String] The ID of the item to reference.
-
#
-
# @param type [Symbol, OpenAI::Models::Responses::ResponseInputItem::ItemReference::Type, nil] The type of item to reference. Always `item_reference`.
-
-
# The type of item to reference. Always `item_reference`.
-
#
-
# @see OpenAI::Models::Responses::ResponseInputItem::ItemReference#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ITEM_REFERENCE = :item_reference
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# @type [OpenAI::Internal::Type::Converter]
-
ResponseInputMessageContentList =
-
1
OpenAI::Internal::Type::ArrayOf[union: -> { OpenAI::Responses::ResponseInputContent }]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseInputMessageItem < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the message input.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute content
-
# A list of one or many input items to the model, containing different content
-
# types.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>]
-
1
required :content,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseInputContent]
-
}
-
-
# @!attribute role
-
# The role of the message input. One of `user`, `system`, or `developer`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseInputMessageItem::Role]
-
1
required :role, enum: -> { OpenAI::Responses::ResponseInputMessageItem::Role }
-
-
# @!attribute status
-
# The status of item. One of `in_progress`, `completed`, or `incomplete`.
-
# Populated when items are returned via API.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseInputMessageItem::Status, nil]
-
1
optional :status, enum: -> { OpenAI::Responses::ResponseInputMessageItem::Status }
-
-
# @!attribute type
-
# The type of the message input. Always set to `message`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseInputMessageItem::Type, nil]
-
1
optional :type, enum: -> { OpenAI::Responses::ResponseInputMessageItem::Type }
-
-
# @!method initialize(id:, content:, role:, status: nil, type: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseInputMessageItem} for more details.
-
#
-
# @param id [String] The unique ID of the message input.
-
#
-
# @param content [Array<OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile>] A list of one or many input items to the model, containing different content
-
#
-
# @param role [Symbol, OpenAI::Models::Responses::ResponseInputMessageItem::Role] The role of the message input. One of `user`, `system`, or `developer`.
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseInputMessageItem::Status] The status of item. One of `in_progress`, `completed`, or
-
#
-
# @param type [Symbol, OpenAI::Models::Responses::ResponseInputMessageItem::Type] The type of the message input. Always set to `message`.
-
-
# The role of the message input. One of `user`, `system`, or `developer`.
-
#
-
# @see OpenAI::Models::Responses::ResponseInputMessageItem#role
-
1
module Role
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
USER = :user
-
1
SYSTEM = :system
-
1
DEVELOPER = :developer
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The status of item. One of `in_progress`, `completed`, or `incomplete`.
-
# Populated when items are returned via API.
-
#
-
# @see OpenAI::Models::Responses::ResponseInputMessageItem#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
INCOMPLETE = :incomplete
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The type of the message input. Always set to `message`.
-
#
-
# @see OpenAI::Models::Responses::ResponseInputMessageItem#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
MESSAGE = :message
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseInputText < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The text input to the model.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of the input item. Always `input_text`.
-
#
-
# @return [Symbol, :input_text]
-
1
required :type, const: :input_text
-
-
# @!method initialize(text:, type: :input_text)
-
# A text input to the model.
-
#
-
# @param text [String] The text input to the model.
-
#
-
# @param type [Symbol, :input_text] The type of the input item. Always `input_text`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# Content item used to generate a response.
-
1
module ResponseItem
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
1
variant :message, -> { OpenAI::Responses::ResponseInputMessageItem }
-
-
# An output message from the model.
-
1
variant :message, -> { OpenAI::Responses::ResponseOutputMessage }
-
-
# The results of a file search tool call. See the
-
# [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information.
-
1
variant :file_search_call, -> { OpenAI::Responses::ResponseFileSearchToolCall }
-
-
# A tool call to a computer use tool. See the
-
# [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information.
-
1
variant :computer_call, -> { OpenAI::Responses::ResponseComputerToolCall }
-
-
1
variant :computer_call_output, -> { OpenAI::Responses::ResponseComputerToolCallOutputItem }
-
-
# The results of a web search tool call. See the
-
# [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information.
-
1
variant :web_search_call, -> { OpenAI::Responses::ResponseFunctionWebSearch }
-
-
# A tool call to run a function. See the
-
# [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information.
-
1
variant :function_call, -> { OpenAI::Responses::ResponseFunctionToolCallItem }
-
-
1
variant :function_call_output, -> { OpenAI::Responses::ResponseFunctionToolCallOutputItem }
-
-
# An image generation request made by the model.
-
1
variant :image_generation_call, -> { OpenAI::Responses::ResponseItem::ImageGenerationCall }
-
-
# A tool call to run code.
-
1
variant :code_interpreter_call, -> { OpenAI::Responses::ResponseCodeInterpreterToolCall }
-
-
# A tool call to run a command on the local shell.
-
1
variant :local_shell_call, -> { OpenAI::Responses::ResponseItem::LocalShellCall }
-
-
# The output of a local shell tool call.
-
1
variant :local_shell_call_output, -> { OpenAI::Responses::ResponseItem::LocalShellCallOutput }
-
-
# A list of tools available on an MCP server.
-
1
variant :mcp_list_tools, -> { OpenAI::Responses::ResponseItem::McpListTools }
-
-
# A request for human approval of a tool invocation.
-
1
variant :mcp_approval_request, -> { OpenAI::Responses::ResponseItem::McpApprovalRequest }
-
-
# A response to an MCP approval request.
-
1
variant :mcp_approval_response, -> { OpenAI::Responses::ResponseItem::McpApprovalResponse }
-
-
# An invocation of a tool on an MCP server.
-
1
variant :mcp_call, -> { OpenAI::Responses::ResponseItem::McpCall }
-
-
1
class ImageGenerationCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the image generation call.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute result
-
# The generated image encoded in base64.
-
#
-
# @return [String, nil]
-
1
required :result, String, nil?: true
-
-
# @!attribute status
-
# The status of the image generation call.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall::Status]
-
1
required :status, enum: -> { OpenAI::Responses::ResponseItem::ImageGenerationCall::Status }
-
-
# @!attribute type
-
# The type of the image generation call. Always `image_generation_call`.
-
#
-
# @return [Symbol, :image_generation_call]
-
1
required :type, const: :image_generation_call
-
-
# @!method initialize(id:, result:, status:, type: :image_generation_call)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseItem::ImageGenerationCall} for more details.
-
#
-
# An image generation request made by the model.
-
#
-
# @param id [String] The unique ID of the image generation call.
-
#
-
# @param result [String, nil] The generated image encoded in base64.
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall::Status] The status of the image generation call.
-
#
-
# @param type [Symbol, :image_generation_call] The type of the image generation call. Always `image_generation_call`.
-
-
# The status of the image generation call.
-
#
-
# @see OpenAI::Models::Responses::ResponseItem::ImageGenerationCall#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
GENERATING = :generating
-
1
FAILED = :failed
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
1
class LocalShellCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the local shell call.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute action
-
# Execute a shell command on the server.
-
#
-
# @return [OpenAI::Models::Responses::ResponseItem::LocalShellCall::Action]
-
1
required :action, -> { OpenAI::Responses::ResponseItem::LocalShellCall::Action }
-
-
# @!attribute call_id
-
# The unique ID of the local shell tool call generated by the model.
-
#
-
# @return [String]
-
1
required :call_id, String
-
-
# @!attribute status
-
# The status of the local shell call.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseItem::LocalShellCall::Status]
-
1
required :status, enum: -> { OpenAI::Responses::ResponseItem::LocalShellCall::Status }
-
-
# @!attribute type
-
# The type of the local shell call. Always `local_shell_call`.
-
#
-
# @return [Symbol, :local_shell_call]
-
1
required :type, const: :local_shell_call
-
-
# @!method initialize(id:, action:, call_id:, status:, type: :local_shell_call)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseItem::LocalShellCall} for more details.
-
#
-
# A tool call to run a command on the local shell.
-
#
-
# @param id [String] The unique ID of the local shell call.
-
#
-
# @param action [OpenAI::Models::Responses::ResponseItem::LocalShellCall::Action] Execute a shell command on the server.
-
#
-
# @param call_id [String] The unique ID of the local shell tool call generated by the model.
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseItem::LocalShellCall::Status] The status of the local shell call.
-
#
-
# @param type [Symbol, :local_shell_call] The type of the local shell call. Always `local_shell_call`.
-
-
# @see OpenAI::Models::Responses::ResponseItem::LocalShellCall#action
-
1
class Action < OpenAI::Internal::Type::BaseModel
-
# @!attribute command
-
# The command to run.
-
#
-
# @return [Array<String>]
-
1
required :command, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute env
-
# Environment variables to set for the command.
-
#
-
# @return [Hash{Symbol=>String}]
-
1
required :env, OpenAI::Internal::Type::HashOf[String]
-
-
# @!attribute type
-
# The type of the local shell action. Always `exec`.
-
#
-
# @return [Symbol, :exec]
-
1
required :type, const: :exec
-
-
# @!attribute timeout_ms
-
# Optional timeout in milliseconds for the command.
-
#
-
# @return [Integer, nil]
-
1
optional :timeout_ms, Integer, nil?: true
-
-
# @!attribute user
-
# Optional user to run the command as.
-
#
-
# @return [String, nil]
-
1
optional :user, String, nil?: true
-
-
# @!attribute working_directory
-
# Optional working directory to run the command in.
-
#
-
# @return [String, nil]
-
1
optional :working_directory, String, nil?: true
-
-
# @!method initialize(command:, env:, timeout_ms: nil, user: nil, working_directory: nil, type: :exec)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseItem::LocalShellCall::Action} for more
-
# details.
-
#
-
# Execute a shell command on the server.
-
#
-
# @param command [Array<String>] The command to run.
-
#
-
# @param env [Hash{Symbol=>String}] Environment variables to set for the command.
-
#
-
# @param timeout_ms [Integer, nil] Optional timeout in milliseconds for the command.
-
#
-
# @param user [String, nil] Optional user to run the command as.
-
#
-
# @param working_directory [String, nil] Optional working directory to run the command in.
-
#
-
# @param type [Symbol, :exec] The type of the local shell action. Always `exec`.
-
end
-
-
# The status of the local shell call.
-
#
-
# @see OpenAI::Models::Responses::ResponseItem::LocalShellCall#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
INCOMPLETE = :incomplete
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
1
class LocalShellCallOutput < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the local shell tool call generated by the model.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute output
-
# A JSON string of the output of the local shell tool call.
-
#
-
# @return [String]
-
1
required :output, String
-
-
# @!attribute type
-
# The type of the local shell tool call output. Always `local_shell_call_output`.
-
#
-
# @return [Symbol, :local_shell_call_output]
-
1
required :type, const: :local_shell_call_output
-
-
# @!attribute status
-
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput::Status, nil]
-
1
optional :status,
-
enum: -> {
-
OpenAI::Responses::ResponseItem::LocalShellCallOutput::Status
-
},
-
nil?: true
-
-
# @!method initialize(id:, output:, status: nil, type: :local_shell_call_output)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput} for more
-
# details.
-
#
-
# The output of a local shell tool call.
-
#
-
# @param id [String] The unique ID of the local shell tool call generated by the model.
-
#
-
# @param output [String] A JSON string of the output of the local shell tool call.
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput::Status, nil] The status of the item. One of `in_progress`, `completed`, or `incomplete`.
-
#
-
# @param type [Symbol, :local_shell_call_output] The type of the local shell tool call output. Always `local_shell_call_output`.
-
-
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
-
#
-
# @see OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
INCOMPLETE = :incomplete
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
1
class McpListTools < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the list.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute server_label
-
# The label of the MCP server.
-
#
-
# @return [String]
-
1
required :server_label, String
-
-
# @!attribute tools
-
# The tools available on the server.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseItem::McpListTools::Tool>]
-
1
required :tools,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseItem::McpListTools::Tool] }
-
-
# @!attribute type
-
# The type of the item. Always `mcp_list_tools`.
-
#
-
# @return [Symbol, :mcp_list_tools]
-
1
required :type, const: :mcp_list_tools
-
-
# @!attribute error
-
# Error message if the server could not list tools.
-
#
-
# @return [String, nil]
-
1
optional :error, String, nil?: true
-
-
# @!method initialize(id:, server_label:, tools:, error: nil, type: :mcp_list_tools)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseItem::McpListTools} for more details.
-
#
-
# A list of tools available on an MCP server.
-
#
-
# @param id [String] The unique ID of the list.
-
#
-
# @param server_label [String] The label of the MCP server.
-
#
-
# @param tools [Array<OpenAI::Models::Responses::ResponseItem::McpListTools::Tool>] The tools available on the server.
-
#
-
# @param error [String, nil] Error message if the server could not list tools.
-
#
-
# @param type [Symbol, :mcp_list_tools] The type of the item. Always `mcp_list_tools`.
-
-
1
class Tool < OpenAI::Internal::Type::BaseModel
-
# @!attribute input_schema
-
# The JSON schema describing the tool's input.
-
#
-
# @return [Object]
-
1
required :input_schema, OpenAI::Internal::Type::Unknown
-
-
# @!attribute name
-
# The name of the tool.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute annotations
-
# Additional annotations about the tool.
-
#
-
# @return [Object, nil]
-
1
optional :annotations, OpenAI::Internal::Type::Unknown, nil?: true
-
-
# @!attribute description
-
# The description of the tool.
-
#
-
# @return [String, nil]
-
1
optional :description, String, nil?: true
-
-
# @!method initialize(input_schema:, name:, annotations: nil, description: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseItem::McpListTools::Tool} for more details.
-
#
-
# A tool available on an MCP server.
-
#
-
# @param input_schema [Object] The JSON schema describing the tool's input.
-
#
-
# @param name [String] The name of the tool.
-
#
-
# @param annotations [Object, nil] Additional annotations about the tool.
-
#
-
# @param description [String, nil] The description of the tool.
-
end
-
end
-
-
1
class McpApprovalRequest < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the approval request.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute arguments
-
# A JSON string of arguments for the tool.
-
#
-
# @return [String]
-
1
required :arguments, String
-
-
# @!attribute name
-
# The name of the tool to run.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute server_label
-
# The label of the MCP server making the request.
-
#
-
# @return [String]
-
1
required :server_label, String
-
-
# @!attribute type
-
# The type of the item. Always `mcp_approval_request`.
-
#
-
# @return [Symbol, :mcp_approval_request]
-
1
required :type, const: :mcp_approval_request
-
-
# @!method initialize(id:, arguments:, name:, server_label:, type: :mcp_approval_request)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseItem::McpApprovalRequest} for more details.
-
#
-
# A request for human approval of a tool invocation.
-
#
-
# @param id [String] The unique ID of the approval request.
-
#
-
# @param arguments [String] A JSON string of arguments for the tool.
-
#
-
# @param name [String] The name of the tool to run.
-
#
-
# @param server_label [String] The label of the MCP server making the request.
-
#
-
# @param type [Symbol, :mcp_approval_request] The type of the item. Always `mcp_approval_request`.
-
end
-
-
1
class McpApprovalResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the approval response
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute approval_request_id
-
# The ID of the approval request being answered.
-
#
-
# @return [String]
-
1
required :approval_request_id, String
-
-
# @!attribute approve
-
# Whether the request was approved.
-
#
-
# @return [Boolean]
-
1
required :approve, OpenAI::Internal::Type::Boolean
-
-
# @!attribute type
-
# The type of the item. Always `mcp_approval_response`.
-
#
-
# @return [Symbol, :mcp_approval_response]
-
1
required :type, const: :mcp_approval_response
-
-
# @!attribute reason
-
# Optional reason for the decision.
-
#
-
# @return [String, nil]
-
1
optional :reason, String, nil?: true
-
-
# @!method initialize(id:, approval_request_id:, approve:, reason: nil, type: :mcp_approval_response)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseItem::McpApprovalResponse} for more details.
-
#
-
# A response to an MCP approval request.
-
#
-
# @param id [String] The unique ID of the approval response
-
#
-
# @param approval_request_id [String] The ID of the approval request being answered.
-
#
-
# @param approve [Boolean] Whether the request was approved.
-
#
-
# @param reason [String, nil] Optional reason for the decision.
-
#
-
# @param type [Symbol, :mcp_approval_response] The type of the item. Always `mcp_approval_response`.
-
end
-
-
1
class McpCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the tool call.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute arguments
-
# A JSON string of the arguments passed to the tool.
-
#
-
# @return [String]
-
1
required :arguments, String
-
-
# @!attribute name
-
# The name of the tool that was run.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute server_label
-
# The label of the MCP server running the tool.
-
#
-
# @return [String]
-
1
required :server_label, String
-
-
# @!attribute type
-
# The type of the item. Always `mcp_call`.
-
#
-
# @return [Symbol, :mcp_call]
-
1
required :type, const: :mcp_call
-
-
# @!attribute error
-
# The error from the tool call, if any.
-
#
-
# @return [String, nil]
-
1
optional :error, String, nil?: true
-
-
# @!attribute output
-
# The output from the tool call.
-
#
-
# @return [String, nil]
-
1
optional :output, String, nil?: true
-
-
# @!method initialize(id:, arguments:, name:, server_label:, error: nil, output: nil, type: :mcp_call)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseItem::McpCall} for more details.
-
#
-
# An invocation of a tool on an MCP server.
-
#
-
# @param id [String] The unique ID of the tool call.
-
#
-
# @param arguments [String] A JSON string of the arguments passed to the tool.
-
#
-
# @param name [String] The name of the tool that was run.
-
#
-
# @param server_label [String] The label of the MCP server running the tool.
-
#
-
# @param error [String, nil] The error from the tool call, if any.
-
#
-
# @param output [String, nil] The output from the tool call.
-
#
-
# @param type [Symbol, :mcp_call] The type of the item. Always `mcp_call`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Responses::ResponseInputMessageItem, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseItem::LocalShellCall, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseItem::McpListTools, OpenAI::Models::Responses::ResponseItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseItem::McpCall)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseItemList < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# A list of items used to generate this response.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseInputMessageItem, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseItem::LocalShellCall, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseItem::McpListTools, OpenAI::Models::Responses::ResponseItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseItem::McpCall>]
-
1
required :data, -> { OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseItem] }
-
-
# @!attribute first_id
-
# The ID of the first item in the list.
-
#
-
# @return [String]
-
1
required :first_id, String
-
-
# @!attribute has_more
-
# Whether there are more items available.
-
#
-
# @return [Boolean]
-
1
required :has_more, OpenAI::Internal::Type::Boolean
-
-
# @!attribute last_id
-
# The ID of the last item in the list.
-
#
-
# @return [String]
-
1
required :last_id, String
-
-
# @!attribute object
-
# The type of object returned, must be `list`.
-
#
-
# @return [Symbol, :list]
-
1
required :object, const: :list
-
-
# @!method initialize(data:, first_id:, has_more:, last_id:, object: :list)
-
# A list of Response items.
-
#
-
# @param data [Array<OpenAI::Models::Responses::ResponseInputMessageItem, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseItem::LocalShellCall, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseItem::McpListTools, OpenAI::Models::Responses::ResponseItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseItem::McpCall>] A list of items used to generate this response.
-
#
-
# @param first_id [String] The ID of the first item in the list.
-
#
-
# @param has_more [Boolean] Whether there are more items available.
-
#
-
# @param last_id [String] The ID of the last item in the list.
-
#
-
# @param object [Symbol, :list] The type of object returned, must be `list`.
-
end
-
end
-
-
1
ResponseItemList = Responses::ResponseItemList
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseMcpCallArgumentsDeltaEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute delta
-
# A JSON string containing the partial update to the arguments for the MCP tool
-
# call.
-
#
-
# @return [String]
-
1
required :delta, String
-
-
# @!attribute item_id
-
# The unique identifier of the MCP tool call item being processed.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item in the response's output array.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always 'response.mcp_call_arguments.delta'.
-
#
-
# @return [Symbol, :"response.mcp_call_arguments.delta"]
-
1
required :type, const: :"response.mcp_call_arguments.delta"
-
-
# @!method initialize(delta:, item_id:, output_index:, sequence_number:, type: :"response.mcp_call_arguments.delta")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent} for more
-
# details.
-
#
-
# Emitted when there is a delta (partial update) to the arguments of an MCP tool
-
# call.
-
#
-
# @param delta [String] A JSON string containing the partial update to the arguments for the MCP tool ca
-
#
-
# @param item_id [String] The unique identifier of the MCP tool call item being processed.
-
#
-
# @param output_index [Integer] The index of the output item in the response's output array.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.mcp_call_arguments.delta"] The type of the event. Always 'response.mcp_call_arguments.delta'.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseMcpCallArgumentsDoneEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute arguments
-
# A JSON string containing the finalized arguments for the MCP tool call.
-
#
-
# @return [String]
-
1
required :arguments, String
-
-
# @!attribute item_id
-
# The unique identifier of the MCP tool call item being processed.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item in the response's output array.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always 'response.mcp_call_arguments.done'.
-
#
-
# @return [Symbol, :"response.mcp_call_arguments.done"]
-
1
required :type, const: :"response.mcp_call_arguments.done"
-
-
# @!method initialize(arguments:, item_id:, output_index:, sequence_number:, type: :"response.mcp_call_arguments.done")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent} for more details.
-
#
-
# Emitted when the arguments for an MCP tool call are finalized.
-
#
-
# @param arguments [String] A JSON string containing the finalized arguments for the MCP tool call.
-
#
-
# @param item_id [String] The unique identifier of the MCP tool call item being processed.
-
#
-
# @param output_index [Integer] The index of the output item in the response's output array.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.mcp_call_arguments.done"] The type of the event. Always 'response.mcp_call_arguments.done'.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseMcpCallCompletedEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The ID of the MCP tool call item that completed.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item that completed.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always 'response.mcp_call.completed'.
-
#
-
# @return [Symbol, :"response.mcp_call.completed"]
-
1
required :type, const: :"response.mcp_call.completed"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.mcp_call.completed")
-
# Emitted when an MCP tool call has completed successfully.
-
#
-
# @param item_id [String] The ID of the MCP tool call item that completed.
-
#
-
# @param output_index [Integer] The index of the output item that completed.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.mcp_call.completed"] The type of the event. Always 'response.mcp_call.completed'.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseMcpCallFailedEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The ID of the MCP tool call item that failed.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item that failed.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always 'response.mcp_call.failed'.
-
#
-
# @return [Symbol, :"response.mcp_call.failed"]
-
1
required :type, const: :"response.mcp_call.failed"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.mcp_call.failed")
-
# Emitted when an MCP tool call has failed.
-
#
-
# @param item_id [String] The ID of the MCP tool call item that failed.
-
#
-
# @param output_index [Integer] The index of the output item that failed.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.mcp_call.failed"] The type of the event. Always 'response.mcp_call.failed'.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseMcpCallInProgressEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The unique identifier of the MCP tool call item being processed.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item in the response's output array.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always 'response.mcp_call.in_progress'.
-
#
-
# @return [Symbol, :"response.mcp_call.in_progress"]
-
1
required :type, const: :"response.mcp_call.in_progress"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.mcp_call.in_progress")
-
# Emitted when an MCP tool call is in progress.
-
#
-
# @param item_id [String] The unique identifier of the MCP tool call item being processed.
-
#
-
# @param output_index [Integer] The index of the output item in the response's output array.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.mcp_call.in_progress"] The type of the event. Always 'response.mcp_call.in_progress'.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseMcpListToolsCompletedEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The ID of the MCP tool call item that produced this output.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item that was processed.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always 'response.mcp_list_tools.completed'.
-
#
-
# @return [Symbol, :"response.mcp_list_tools.completed"]
-
1
required :type, const: :"response.mcp_list_tools.completed"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.mcp_list_tools.completed")
-
# Emitted when the list of available MCP tools has been successfully retrieved.
-
#
-
# @param item_id [String] The ID of the MCP tool call item that produced this output.
-
#
-
# @param output_index [Integer] The index of the output item that was processed.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.mcp_list_tools.completed"] The type of the event. Always 'response.mcp_list_tools.completed'.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseMcpListToolsFailedEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The ID of the MCP tool call item that failed.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item that failed.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always 'response.mcp_list_tools.failed'.
-
#
-
# @return [Symbol, :"response.mcp_list_tools.failed"]
-
1
required :type, const: :"response.mcp_list_tools.failed"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.mcp_list_tools.failed")
-
# Emitted when the attempt to list available MCP tools has failed.
-
#
-
# @param item_id [String] The ID of the MCP tool call item that failed.
-
#
-
# @param output_index [Integer] The index of the output item that failed.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.mcp_list_tools.failed"] The type of the event. Always 'response.mcp_list_tools.failed'.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseMcpListToolsInProgressEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The ID of the MCP tool call item that is being processed.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item that is being processed.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always 'response.mcp_list_tools.in_progress'.
-
#
-
# @return [Symbol, :"response.mcp_list_tools.in_progress"]
-
1
required :type, const: :"response.mcp_list_tools.in_progress"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.mcp_list_tools.in_progress")
-
# Emitted when the system is in the process of retrieving the list of available
-
# MCP tools.
-
#
-
# @param item_id [String] The ID of the MCP tool call item that is being processed.
-
#
-
# @param output_index [Integer] The index of the output item that is being processed.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.mcp_list_tools.in_progress"] The type of the event. Always 'response.mcp_list_tools.in_progress'.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseOutputAudio < OpenAI::Internal::Type::BaseModel
-
# @!attribute data
-
# Base64-encoded audio data from the model.
-
#
-
# @return [String]
-
1
required :data, String
-
-
# @!attribute transcript
-
# The transcript of the audio data from the model.
-
#
-
# @return [String]
-
1
required :transcript, String
-
-
# @!attribute type
-
# The type of the output audio. Always `output_audio`.
-
#
-
# @return [Symbol, :output_audio]
-
1
required :type, const: :output_audio
-
-
# @!method initialize(data:, transcript:, type: :output_audio)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseOutputAudio} for more details.
-
#
-
# An audio output from the model.
-
#
-
# @param data [String] Base64-encoded audio data from the model.
-
#
-
# @param transcript [String] The transcript of the audio data from the model.
-
#
-
# @param type [Symbol, :output_audio] The type of the output audio. Always `output_audio`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# An output message from the model.
-
1
module ResponseOutputItem
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# An output message from the model.
-
1
variant :message, -> { OpenAI::Responses::ResponseOutputMessage }
-
-
# The results of a file search tool call. See the
-
# [file search guide](https://platform.openai.com/docs/guides/tools-file-search) for more information.
-
1
variant :file_search_call, -> { OpenAI::Responses::ResponseFileSearchToolCall }
-
-
# A tool call to run a function. See the
-
# [function calling guide](https://platform.openai.com/docs/guides/function-calling) for more information.
-
1
variant :function_call, -> { OpenAI::Responses::ResponseFunctionToolCall }
-
-
# The results of a web search tool call. See the
-
# [web search guide](https://platform.openai.com/docs/guides/tools-web-search) for more information.
-
1
variant :web_search_call, -> { OpenAI::Responses::ResponseFunctionWebSearch }
-
-
# A tool call to a computer use tool. See the
-
# [computer use guide](https://platform.openai.com/docs/guides/tools-computer-use) for more information.
-
1
variant :computer_call, -> { OpenAI::Responses::ResponseComputerToolCall }
-
-
# A description of the chain of thought used by a reasoning model while generating
-
# a response. Be sure to include these items in your `input` to the Responses API
-
# for subsequent turns of a conversation if you are manually
-
# [managing context](https://platform.openai.com/docs/guides/conversation-state).
-
1
variant :reasoning, -> { OpenAI::Responses::ResponseReasoningItem }
-
-
# An image generation request made by the model.
-
1
variant :image_generation_call, -> { OpenAI::Responses::ResponseOutputItem::ImageGenerationCall }
-
-
# A tool call to run code.
-
1
variant :code_interpreter_call, -> { OpenAI::Responses::ResponseCodeInterpreterToolCall }
-
-
# A tool call to run a command on the local shell.
-
1
variant :local_shell_call, -> { OpenAI::Responses::ResponseOutputItem::LocalShellCall }
-
-
# An invocation of a tool on an MCP server.
-
1
variant :mcp_call, -> { OpenAI::Responses::ResponseOutputItem::McpCall }
-
-
# A list of tools available on an MCP server.
-
1
variant :mcp_list_tools, -> { OpenAI::Responses::ResponseOutputItem::McpListTools }
-
-
# A request for human approval of a tool invocation.
-
1
variant :mcp_approval_request, -> { OpenAI::Responses::ResponseOutputItem::McpApprovalRequest }
-
-
1
class ImageGenerationCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the image generation call.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute result
-
# The generated image encoded in base64.
-
#
-
# @return [String, nil]
-
1
required :result, String, nil?: true
-
-
# @!attribute status
-
# The status of the image generation call.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall::Status]
-
1
required :status, enum: -> { OpenAI::Responses::ResponseOutputItem::ImageGenerationCall::Status }
-
-
# @!attribute type
-
# The type of the image generation call. Always `image_generation_call`.
-
#
-
# @return [Symbol, :image_generation_call]
-
1
required :type, const: :image_generation_call
-
-
# @!method initialize(id:, result:, status:, type: :image_generation_call)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall} for more
-
# details.
-
#
-
# An image generation request made by the model.
-
#
-
# @param id [String] The unique ID of the image generation call.
-
#
-
# @param result [String, nil] The generated image encoded in base64.
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall::Status] The status of the image generation call.
-
#
-
# @param type [Symbol, :image_generation_call] The type of the image generation call. Always `image_generation_call`.
-
-
# The status of the image generation call.
-
#
-
# @see OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
GENERATING = :generating
-
1
FAILED = :failed
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
1
class LocalShellCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the local shell call.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute action
-
# Execute a shell command on the server.
-
#
-
# @return [OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall::Action]
-
1
required :action, -> { OpenAI::Responses::ResponseOutputItem::LocalShellCall::Action }
-
-
# @!attribute call_id
-
# The unique ID of the local shell tool call generated by the model.
-
#
-
# @return [String]
-
1
required :call_id, String
-
-
# @!attribute status
-
# The status of the local shell call.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall::Status]
-
1
required :status, enum: -> { OpenAI::Responses::ResponseOutputItem::LocalShellCall::Status }
-
-
# @!attribute type
-
# The type of the local shell call. Always `local_shell_call`.
-
#
-
# @return [Symbol, :local_shell_call]
-
1
required :type, const: :local_shell_call
-
-
# @!method initialize(id:, action:, call_id:, status:, type: :local_shell_call)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall} for more
-
# details.
-
#
-
# A tool call to run a command on the local shell.
-
#
-
# @param id [String] The unique ID of the local shell call.
-
#
-
# @param action [OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall::Action] Execute a shell command on the server.
-
#
-
# @param call_id [String] The unique ID of the local shell tool call generated by the model.
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall::Status] The status of the local shell call.
-
#
-
# @param type [Symbol, :local_shell_call] The type of the local shell call. Always `local_shell_call`.
-
-
# @see OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall#action
-
1
class Action < OpenAI::Internal::Type::BaseModel
-
# @!attribute command
-
# The command to run.
-
#
-
# @return [Array<String>]
-
1
required :command, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute env
-
# Environment variables to set for the command.
-
#
-
# @return [Hash{Symbol=>String}]
-
1
required :env, OpenAI::Internal::Type::HashOf[String]
-
-
# @!attribute type
-
# The type of the local shell action. Always `exec`.
-
#
-
# @return [Symbol, :exec]
-
1
required :type, const: :exec
-
-
# @!attribute timeout_ms
-
# Optional timeout in milliseconds for the command.
-
#
-
# @return [Integer, nil]
-
1
optional :timeout_ms, Integer, nil?: true
-
-
# @!attribute user
-
# Optional user to run the command as.
-
#
-
# @return [String, nil]
-
1
optional :user, String, nil?: true
-
-
# @!attribute working_directory
-
# Optional working directory to run the command in.
-
#
-
# @return [String, nil]
-
1
optional :working_directory, String, nil?: true
-
-
# @!method initialize(command:, env:, timeout_ms: nil, user: nil, working_directory: nil, type: :exec)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall::Action} for more
-
# details.
-
#
-
# Execute a shell command on the server.
-
#
-
# @param command [Array<String>] The command to run.
-
#
-
# @param env [Hash{Symbol=>String}] Environment variables to set for the command.
-
#
-
# @param timeout_ms [Integer, nil] Optional timeout in milliseconds for the command.
-
#
-
# @param user [String, nil] Optional user to run the command as.
-
#
-
# @param working_directory [String, nil] Optional working directory to run the command in.
-
#
-
# @param type [Symbol, :exec] The type of the local shell action. Always `exec`.
-
end
-
-
# The status of the local shell call.
-
#
-
# @see OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
INCOMPLETE = :incomplete
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
1
class McpCall < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the tool call.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute arguments
-
# A JSON string of the arguments passed to the tool.
-
#
-
# @return [String]
-
1
required :arguments, String
-
-
# @!attribute name
-
# The name of the tool that was run.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute server_label
-
# The label of the MCP server running the tool.
-
#
-
# @return [String]
-
1
required :server_label, String
-
-
# @!attribute type
-
# The type of the item. Always `mcp_call`.
-
#
-
# @return [Symbol, :mcp_call]
-
1
required :type, const: :mcp_call
-
-
# @!attribute error
-
# The error from the tool call, if any.
-
#
-
# @return [String, nil]
-
1
optional :error, String, nil?: true
-
-
# @!attribute output
-
# The output from the tool call.
-
#
-
# @return [String, nil]
-
1
optional :output, String, nil?: true
-
-
# @!method initialize(id:, arguments:, name:, server_label:, error: nil, output: nil, type: :mcp_call)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseOutputItem::McpCall} for more details.
-
#
-
# An invocation of a tool on an MCP server.
-
#
-
# @param id [String] The unique ID of the tool call.
-
#
-
# @param arguments [String] A JSON string of the arguments passed to the tool.
-
#
-
# @param name [String] The name of the tool that was run.
-
#
-
# @param server_label [String] The label of the MCP server running the tool.
-
#
-
# @param error [String, nil] The error from the tool call, if any.
-
#
-
# @param output [String, nil] The output from the tool call.
-
#
-
# @param type [Symbol, :mcp_call] The type of the item. Always `mcp_call`.
-
end
-
-
1
class McpListTools < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the list.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute server_label
-
# The label of the MCP server.
-
#
-
# @return [String]
-
1
required :server_label, String
-
-
# @!attribute tools
-
# The tools available on the server.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseOutputItem::McpListTools::Tool>]
-
1
required :tools,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputItem::McpListTools::Tool]
-
}
-
-
# @!attribute type
-
# The type of the item. Always `mcp_list_tools`.
-
#
-
# @return [Symbol, :mcp_list_tools]
-
1
required :type, const: :mcp_list_tools
-
-
# @!attribute error
-
# Error message if the server could not list tools.
-
#
-
# @return [String, nil]
-
1
optional :error, String, nil?: true
-
-
# @!method initialize(id:, server_label:, tools:, error: nil, type: :mcp_list_tools)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseOutputItem::McpListTools} for more details.
-
#
-
# A list of tools available on an MCP server.
-
#
-
# @param id [String] The unique ID of the list.
-
#
-
# @param server_label [String] The label of the MCP server.
-
#
-
# @param tools [Array<OpenAI::Models::Responses::ResponseOutputItem::McpListTools::Tool>] The tools available on the server.
-
#
-
# @param error [String, nil] Error message if the server could not list tools.
-
#
-
# @param type [Symbol, :mcp_list_tools] The type of the item. Always `mcp_list_tools`.
-
-
1
class Tool < OpenAI::Internal::Type::BaseModel
-
# @!attribute input_schema
-
# The JSON schema describing the tool's input.
-
#
-
# @return [Object]
-
1
required :input_schema, OpenAI::Internal::Type::Unknown
-
-
# @!attribute name
-
# The name of the tool.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute annotations
-
# Additional annotations about the tool.
-
#
-
# @return [Object, nil]
-
1
optional :annotations, OpenAI::Internal::Type::Unknown, nil?: true
-
-
# @!attribute description
-
# The description of the tool.
-
#
-
# @return [String, nil]
-
1
optional :description, String, nil?: true
-
-
# @!method initialize(input_schema:, name:, annotations: nil, description: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseOutputItem::McpListTools::Tool} for more
-
# details.
-
#
-
# A tool available on an MCP server.
-
#
-
# @param input_schema [Object] The JSON schema describing the tool's input.
-
#
-
# @param name [String] The name of the tool.
-
#
-
# @param annotations [Object, nil] Additional annotations about the tool.
-
#
-
# @param description [String, nil] The description of the tool.
-
end
-
end
-
-
1
class McpApprovalRequest < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the approval request.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute arguments
-
# A JSON string of arguments for the tool.
-
#
-
# @return [String]
-
1
required :arguments, String
-
-
# @!attribute name
-
# The name of the tool to run.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute server_label
-
# The label of the MCP server making the request.
-
#
-
# @return [String]
-
1
required :server_label, String
-
-
# @!attribute type
-
# The type of the item. Always `mcp_approval_request`.
-
#
-
# @return [Symbol, :mcp_approval_request]
-
1
required :type, const: :mcp_approval_request
-
-
# @!method initialize(id:, arguments:, name:, server_label:, type: :mcp_approval_request)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest} for more
-
# details.
-
#
-
# A request for human approval of a tool invocation.
-
#
-
# @param id [String] The unique ID of the approval request.
-
#
-
# @param arguments [String] A JSON string of arguments for the tool.
-
#
-
# @param name [String] The name of the tool to run.
-
#
-
# @param server_label [String] The label of the MCP server making the request.
-
#
-
# @param type [Symbol, :mcp_approval_request] The type of the item. Always `mcp_approval_request`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseOutputItemAddedEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item
-
# The output item that was added.
-
#
-
# @return [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest]
-
1
required :item, union: -> { OpenAI::Responses::ResponseOutputItem }
-
-
# @!attribute output_index
-
# The index of the output item that was added.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.output_item.added`.
-
#
-
# @return [Symbol, :"response.output_item.added"]
-
1
required :type, const: :"response.output_item.added"
-
-
# @!method initialize(item:, output_index:, sequence_number:, type: :"response.output_item.added")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseOutputItemAddedEvent} for more details.
-
#
-
# Emitted when a new output item is added.
-
#
-
# @param item [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest] The output item that was added.
-
#
-
# @param output_index [Integer] The index of the output item that was added.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.output_item.added"] The type of the event. Always `response.output_item.added`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseOutputItemDoneEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item
-
# The output item that was marked done.
-
#
-
# @return [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest]
-
1
required :item, union: -> { OpenAI::Responses::ResponseOutputItem }
-
-
# @!attribute output_index
-
# The index of the output item that was marked done.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.output_item.done`.
-
#
-
# @return [Symbol, :"response.output_item.done"]
-
1
required :type, const: :"response.output_item.done"
-
-
# @!method initialize(item:, output_index:, sequence_number:, type: :"response.output_item.done")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseOutputItemDoneEvent} for more details.
-
#
-
# Emitted when an output item is marked done.
-
#
-
# @param item [OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseOutputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseOutputItem::LocalShellCall, OpenAI::Models::Responses::ResponseOutputItem::McpCall, OpenAI::Models::Responses::ResponseOutputItem::McpListTools, OpenAI::Models::Responses::ResponseOutputItem::McpApprovalRequest] The output item that was marked done.
-
#
-
# @param output_index [Integer] The index of the output item that was marked done.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.output_item.done"] The type of the event. Always `response.output_item.done`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseOutputMessage < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the output message.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute content
-
# The content of the output message.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal>]
-
1
required :content,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputMessage::Content]
-
}
-
-
# @!attribute role
-
# The role of the output message. Always `assistant`.
-
#
-
# @return [Symbol, :assistant]
-
1
required :role, const: :assistant
-
-
# @!attribute status
-
# The status of the message input. One of `in_progress`, `completed`, or
-
# `incomplete`. Populated when input items are returned via API.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseOutputMessage::Status]
-
1
required :status, enum: -> { OpenAI::Responses::ResponseOutputMessage::Status }
-
-
# @!attribute type
-
# The type of the output message. Always `message`.
-
#
-
# @return [Symbol, :message]
-
1
required :type, const: :message
-
-
# @!method initialize(id:, content:, status:, role: :assistant, type: :message)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseOutputMessage} for more details.
-
#
-
# An output message from the model.
-
#
-
# @param id [String] The unique ID of the output message.
-
#
-
# @param content [Array<OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal>] The content of the output message.
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseOutputMessage::Status] The status of the message input. One of `in_progress`, `completed`, or
-
#
-
# @param role [Symbol, :assistant] The role of the output message. Always `assistant`.
-
#
-
# @param type [Symbol, :message] The type of the output message. Always `message`.
-
-
# A text output from the model.
-
1
module Content
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A text output from the model.
-
1
variant :output_text, -> { OpenAI::Responses::ResponseOutputText }
-
-
# A refusal from the model.
-
1
variant :refusal, -> { OpenAI::Responses::ResponseOutputRefusal }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Responses::ResponseOutputText, OpenAI::Models::Responses::ResponseOutputRefusal)]
-
end
-
-
# The status of the message input. One of `in_progress`, `completed`, or
-
# `incomplete`. Populated when input items are returned via API.
-
#
-
# @see OpenAI::Models::Responses::ResponseOutputMessage#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
INCOMPLETE = :incomplete
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseOutputRefusal < OpenAI::Internal::Type::BaseModel
-
# @!attribute refusal
-
# The refusal explanation from the model.
-
#
-
# @return [String]
-
1
required :refusal, String
-
-
# @!attribute type
-
# The type of the refusal. Always `refusal`.
-
#
-
# @return [Symbol, :refusal]
-
1
required :type, const: :refusal
-
-
# @!method initialize(refusal:, type: :refusal)
-
# A refusal from the model.
-
#
-
# @param refusal [String] The refusal explanation from the model.
-
#
-
# @param type [Symbol, :refusal] The type of the refusal. Always `refusal`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseOutputText < OpenAI::Internal::Type::BaseModel
-
# @!attribute annotations
-
# The annotations of the text output.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::ContainerFileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath>]
-
1
required :annotations,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[union: OpenAI::Responses::ResponseOutputText::Annotation]
-
}
-
-
# @!attribute text
-
# The text output from the model.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute parsed
-
# The parsed contents of the output, if JSON schema is specified.
-
#
-
# @return [Object, nil]
-
1
optional :parsed, OpenAI::StructuredOutput::ParsedJson
-
-
# @!attribute type
-
# The type of the output text. Always `output_text`.
-
#
-
# @return [Symbol, :output_text]
-
1
required :type, const: :output_text
-
-
# @!attribute logprobs
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseOutputText::Logprob>, nil]
-
1
optional :logprobs,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob]
-
}
-
-
# @!method initialize(annotations:, text:, logprobs: nil, type: :output_text)
-
# A text output from the model.
-
#
-
# @param annotations [Array<OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::ContainerFileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath>] The annotations of the text output.
-
#
-
# @param text [String] The text output from the model.
-
#
-
# @param logprobs [Array<OpenAI::Models::Responses::ResponseOutputText::Logprob>]
-
#
-
# @param type [Symbol, :output_text] The type of the output text. Always `output_text`.
-
-
# A citation to a file.
-
1
module Annotation
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# A citation to a file.
-
1
variant :file_citation, -> { OpenAI::Responses::ResponseOutputText::Annotation::FileCitation }
-
-
# A citation for a web resource used to generate a model response.
-
1
variant :url_citation, -> { OpenAI::Responses::ResponseOutputText::Annotation::URLCitation }
-
-
# A citation for a container file used to generate a model response.
-
1
variant :container_file_citation,
-
-> { OpenAI::Responses::ResponseOutputText::Annotation::ContainerFileCitation }
-
-
# A path to a file.
-
1
variant :file_path, -> { OpenAI::Responses::ResponseOutputText::Annotation::FilePath }
-
-
1
class FileCitation < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_id
-
# The ID of the file.
-
#
-
# @return [String]
-
1
required :file_id, String
-
-
# @!attribute filename
-
# The filename of the file cited.
-
#
-
# @return [String]
-
1
required :filename, String
-
-
# @!attribute index
-
# The index of the file in the list of files.
-
#
-
# @return [Integer]
-
1
required :index, Integer
-
-
# @!attribute type
-
# The type of the file citation. Always `file_citation`.
-
#
-
# @return [Symbol, :file_citation]
-
1
required :type, const: :file_citation
-
-
# @!method initialize(file_id:, filename:, index:, type: :file_citation)
-
# A citation to a file.
-
#
-
# @param file_id [String] The ID of the file.
-
#
-
# @param filename [String] The filename of the file cited.
-
#
-
# @param index [Integer] The index of the file in the list of files.
-
#
-
# @param type [Symbol, :file_citation] The type of the file citation. Always `file_citation`.
-
end
-
-
1
class URLCitation < OpenAI::Internal::Type::BaseModel
-
# @!attribute end_index
-
# The index of the last character of the URL citation in the message.
-
#
-
# @return [Integer]
-
1
required :end_index, Integer
-
-
# @!attribute start_index
-
# The index of the first character of the URL citation in the message.
-
#
-
# @return [Integer]
-
1
required :start_index, Integer
-
-
# @!attribute title
-
# The title of the web resource.
-
#
-
# @return [String]
-
1
required :title, String
-
-
# @!attribute type
-
# The type of the URL citation. Always `url_citation`.
-
#
-
# @return [Symbol, :url_citation]
-
1
required :type, const: :url_citation
-
-
# @!attribute url
-
# The URL of the web resource.
-
#
-
# @return [String]
-
1
required :url, String
-
-
# @!method initialize(end_index:, start_index:, title:, url:, type: :url_citation)
-
# A citation for a web resource used to generate a model response.
-
#
-
# @param end_index [Integer] The index of the last character of the URL citation in the message.
-
#
-
# @param start_index [Integer] The index of the first character of the URL citation in the message.
-
#
-
# @param title [String] The title of the web resource.
-
#
-
# @param url [String] The URL of the web resource.
-
#
-
# @param type [Symbol, :url_citation] The type of the URL citation. Always `url_citation`.
-
end
-
-
1
class ContainerFileCitation < OpenAI::Internal::Type::BaseModel
-
# @!attribute container_id
-
# The ID of the container file.
-
#
-
# @return [String]
-
1
required :container_id, String
-
-
# @!attribute end_index
-
# The index of the last character of the container file citation in the message.
-
#
-
# @return [Integer]
-
1
required :end_index, Integer
-
-
# @!attribute file_id
-
# The ID of the file.
-
#
-
# @return [String]
-
1
required :file_id, String
-
-
# @!attribute filename
-
# The filename of the container file cited.
-
#
-
# @return [String]
-
1
required :filename, String
-
-
# @!attribute start_index
-
# The index of the first character of the container file citation in the message.
-
#
-
# @return [Integer]
-
1
required :start_index, Integer
-
-
# @!attribute type
-
# The type of the container file citation. Always `container_file_citation`.
-
#
-
# @return [Symbol, :container_file_citation]
-
1
required :type, const: :container_file_citation
-
-
# @!method initialize(container_id:, end_index:, file_id:, filename:, start_index:, type: :container_file_citation)
-
# A citation for a container file used to generate a model response.
-
#
-
# @param container_id [String] The ID of the container file.
-
#
-
# @param end_index [Integer] The index of the last character of the container file citation in the message.
-
#
-
# @param file_id [String] The ID of the file.
-
#
-
# @param filename [String] The filename of the container file cited.
-
#
-
# @param start_index [Integer] The index of the first character of the container file citation in the message.
-
#
-
# @param type [Symbol, :container_file_citation] The type of the container file citation. Always `container_file_citation`.
-
end
-
-
1
class FilePath < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_id
-
# The ID of the file.
-
#
-
# @return [String]
-
1
required :file_id, String
-
-
# @!attribute index
-
# The index of the file in the list of files.
-
#
-
# @return [Integer]
-
1
required :index, Integer
-
-
# @!attribute type
-
# The type of the file path. Always `file_path`.
-
#
-
# @return [Symbol, :file_path]
-
1
required :type, const: :file_path
-
-
# @!method initialize(file_id:, index:, type: :file_path)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath} for more
-
# details.
-
#
-
# A path to a file.
-
#
-
# @param file_id [String] The ID of the file.
-
#
-
# @param index [Integer] The index of the file in the list of files.
-
#
-
# @param type [Symbol, :file_path] The type of the file path. Always `file_path`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Responses::ResponseOutputText::Annotation::FileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::URLCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::ContainerFileCitation, OpenAI::Models::Responses::ResponseOutputText::Annotation::FilePath)]
-
end
-
-
1
class Logprob < OpenAI::Internal::Type::BaseModel
-
# @!attribute token
-
#
-
# @return [String]
-
1
required :token, String
-
-
# @!attribute bytes
-
#
-
# @return [Array<Integer>]
-
1
required :bytes, OpenAI::Internal::Type::ArrayOf[Integer]
-
-
# @!attribute logprob
-
#
-
# @return [Float]
-
1
required :logprob, Float
-
-
# @!attribute top_logprobs
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseOutputText::Logprob::TopLogprob>]
-
1
required :top_logprobs,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseOutputText::Logprob::TopLogprob]
-
}
-
-
# @!method initialize(token:, bytes:, logprob:, top_logprobs:)
-
# The log probability of a token.
-
#
-
# @param token [String]
-
# @param bytes [Array<Integer>]
-
# @param logprob [Float]
-
# @param top_logprobs [Array<OpenAI::Models::Responses::ResponseOutputText::Logprob::TopLogprob>]
-
-
1
class TopLogprob < OpenAI::Internal::Type::BaseModel
-
# @!attribute token
-
#
-
# @return [String]
-
1
required :token, String
-
-
# @!attribute bytes
-
#
-
# @return [Array<Integer>]
-
1
required :bytes, OpenAI::Internal::Type::ArrayOf[Integer]
-
-
# @!attribute logprob
-
#
-
# @return [Float]
-
1
required :logprob, Float
-
-
# @!method initialize(token:, bytes:, logprob:)
-
# The top log probability of a token.
-
#
-
# @param token [String]
-
# @param bytes [Array<Integer>]
-
# @param logprob [Float]
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseOutputTextAnnotationAddedEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute annotation
-
# The annotation object being added. (See annotation schema for details.)
-
#
-
# @return [Object]
-
1
required :annotation, OpenAI::Internal::Type::Unknown
-
-
# @!attribute annotation_index
-
# The index of the annotation within the content part.
-
#
-
# @return [Integer]
-
1
required :annotation_index, Integer
-
-
# @!attribute content_index
-
# The index of the content part within the output item.
-
#
-
# @return [Integer]
-
1
required :content_index, Integer
-
-
# @!attribute item_id
-
# The unique identifier of the item to which the annotation is being added.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item in the response's output array.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always 'response.output_text.annotation.added'.
-
#
-
# @return [Symbol, :"response.output_text.annotation.added"]
-
1
required :type, const: :"response.output_text.annotation.added"
-
-
# @!method initialize(annotation:, annotation_index:, content_index:, item_id:, output_index:, sequence_number:, type: :"response.output_text.annotation.added")
-
# Emitted when an annotation is added to output text content.
-
#
-
# @param annotation [Object] The annotation object being added. (See annotation schema for details.)
-
#
-
# @param annotation_index [Integer] The index of the annotation within the content part.
-
#
-
# @param content_index [Integer] The index of the content part within the output item.
-
#
-
# @param item_id [String] The unique identifier of the item to which the annotation is being added.
-
#
-
# @param output_index [Integer] The index of the output item in the response's output array.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.output_text.annotation.added"] The type of the event. Always 'response.output_text.annotation.added'.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponsePrompt < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique identifier of the prompt template to use.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute variables
-
# Optional map of values to substitute in for variables in your prompt. The
-
# substitution values can either be strings, or other Response input types like
-
# images or files.
-
#
-
# @return [Hash{Symbol=>String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile}, nil]
-
1
optional :variables,
-
-> { OpenAI::Internal::Type::HashOf[union: OpenAI::Responses::ResponsePrompt::Variable] },
-
nil?: true
-
-
# @!attribute version
-
# Optional version of the prompt template.
-
#
-
# @return [String, nil]
-
1
optional :version, String, nil?: true
-
-
# @!method initialize(id:, variables: nil, version: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponsePrompt} for more details.
-
#
-
# Reference to a prompt template and its variables.
-
# [Learn more](https://platform.openai.com/docs/guides/text?api-mode=responses#reusable-prompts).
-
#
-
# @param id [String] The unique identifier of the prompt template to use.
-
#
-
# @param variables [Hash{Symbol=>String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile}, nil] Optional map of values to substitute in for variables in your
-
#
-
# @param version [String, nil] Optional version of the prompt template.
-
-
# A text input to the model.
-
1
module Variable
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
# A text input to the model.
-
1
variant -> { OpenAI::Responses::ResponseInputText }
-
-
# An image input to the model. Learn about [image inputs](https://platform.openai.com/docs/guides/vision).
-
1
variant -> { OpenAI::Responses::ResponseInputImage }
-
-
# A file input to the model.
-
1
variant -> { OpenAI::Responses::ResponseInputFile }
-
-
# @!method self.variants
-
# @return [Array(String, OpenAI::Models::Responses::ResponseInputText, OpenAI::Models::Responses::ResponseInputImage, OpenAI::Models::Responses::ResponseInputFile)]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseQueuedEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute response
-
# The full response object that is queued.
-
#
-
# @return [OpenAI::Models::Responses::Response]
-
1
required :response, -> { OpenAI::Responses::Response }
-
-
# @!attribute sequence_number
-
# The sequence number for this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always 'response.queued'.
-
#
-
# @return [Symbol, :"response.queued"]
-
1
required :type, const: :"response.queued"
-
-
# @!method initialize(response:, sequence_number:, type: :"response.queued")
-
# Emitted when a response is queued and waiting to be processed.
-
#
-
# @param response [OpenAI::Models::Responses::Response] The full response object that is queued.
-
#
-
# @param sequence_number [Integer] The sequence number for this event.
-
#
-
# @param type [Symbol, :"response.queued"] The type of the event. Always 'response.queued'.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseReasoningItem < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique identifier of the reasoning content.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute summary
-
# Reasoning text contents.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseReasoningItem::Summary>]
-
1
required :summary,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseReasoningItem::Summary] }
-
-
# @!attribute type
-
# The type of the object. Always `reasoning`.
-
#
-
# @return [Symbol, :reasoning]
-
1
required :type, const: :reasoning
-
-
# @!attribute encrypted_content
-
# The encrypted content of the reasoning item - populated when a response is
-
# generated with `reasoning.encrypted_content` in the `include` parameter.
-
#
-
# @return [String, nil]
-
1
optional :encrypted_content, String, nil?: true
-
-
# @!attribute status
-
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
-
# Populated when items are returned via API.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ResponseReasoningItem::Status, nil]
-
1
optional :status, enum: -> { OpenAI::Responses::ResponseReasoningItem::Status }
-
-
# @!method initialize(id:, summary:, encrypted_content: nil, status: nil, type: :reasoning)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseReasoningItem} for more details.
-
#
-
# A description of the chain of thought used by a reasoning model while generating
-
# a response. Be sure to include these items in your `input` to the Responses API
-
# for subsequent turns of a conversation if you are manually
-
# [managing context](https://platform.openai.com/docs/guides/conversation-state).
-
#
-
# @param id [String] The unique identifier of the reasoning content.
-
#
-
# @param summary [Array<OpenAI::Models::Responses::ResponseReasoningItem::Summary>] Reasoning text contents.
-
#
-
# @param encrypted_content [String, nil] The encrypted content of the reasoning item - populated when a response is
-
#
-
# @param status [Symbol, OpenAI::Models::Responses::ResponseReasoningItem::Status] The status of the item. One of `in_progress`, `completed`, or
-
#
-
# @param type [Symbol, :reasoning] The type of the object. Always `reasoning`.
-
-
1
class Summary < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# A short summary of the reasoning used by the model when generating the response.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of the object. Always `summary_text`.
-
#
-
# @return [Symbol, :summary_text]
-
1
required :type, const: :summary_text
-
-
# @!method initialize(text:, type: :summary_text)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseReasoningItem::Summary} for more details.
-
#
-
# @param text [String] A short summary of the reasoning used by the model when generating
-
#
-
# @param type [Symbol, :summary_text] The type of the object. Always `summary_text`.
-
end
-
-
# The status of the item. One of `in_progress`, `completed`, or `incomplete`.
-
# Populated when items are returned via API.
-
#
-
# @see OpenAI::Models::Responses::ResponseReasoningItem#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
INCOMPLETE = :incomplete
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseReasoningSummaryDeltaEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute delta
-
# The partial update to the reasoning summary content.
-
#
-
# @return [Object]
-
1
required :delta, OpenAI::Internal::Type::Unknown
-
-
# @!attribute item_id
-
# The unique identifier of the item for which the reasoning summary is being
-
# updated.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item in the response's output array.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute summary_index
-
# The index of the summary part within the output item.
-
#
-
# @return [Integer]
-
1
required :summary_index, Integer
-
-
# @!attribute type
-
# The type of the event. Always 'response.reasoning_summary.delta'.
-
#
-
# @return [Symbol, :"response.reasoning_summary.delta"]
-
1
required :type, const: :"response.reasoning_summary.delta"
-
-
# @!method initialize(delta:, item_id:, output_index:, sequence_number:, summary_index:, type: :"response.reasoning_summary.delta")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent} for more
-
# details.
-
#
-
# Emitted when there is a delta (partial update) to the reasoning summary content.
-
#
-
# @param delta [Object] The partial update to the reasoning summary content.
-
#
-
# @param item_id [String] The unique identifier of the item for which the reasoning summary is being updat
-
#
-
# @param output_index [Integer] The index of the output item in the response's output array.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param summary_index [Integer] The index of the summary part within the output item.
-
#
-
# @param type [Symbol, :"response.reasoning_summary.delta"] The type of the event. Always 'response.reasoning_summary.delta'.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseReasoningSummaryDoneEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The unique identifier of the item for which the reasoning summary is finalized.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item in the response's output array.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute summary_index
-
# The index of the summary part within the output item.
-
#
-
# @return [Integer]
-
1
required :summary_index, Integer
-
-
# @!attribute text
-
# The finalized reasoning summary text.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of the event. Always 'response.reasoning_summary.done'.
-
#
-
# @return [Symbol, :"response.reasoning_summary.done"]
-
1
required :type, const: :"response.reasoning_summary.done"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, summary_index:, text:, type: :"response.reasoning_summary.done")
-
# Emitted when the reasoning summary content is finalized for an item.
-
#
-
# @param item_id [String] The unique identifier of the item for which the reasoning summary is finalized.
-
#
-
# @param output_index [Integer] The index of the output item in the response's output array.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param summary_index [Integer] The index of the summary part within the output item.
-
#
-
# @param text [String] The finalized reasoning summary text.
-
#
-
# @param type [Symbol, :"response.reasoning_summary.done"] The type of the event. Always 'response.reasoning_summary.done'.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseReasoningSummaryPartAddedEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The ID of the item this summary part is associated with.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item this summary part is associated with.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute part
-
# The summary part that was added.
-
#
-
# @return [OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent::Part]
-
1
required :part, -> { OpenAI::Responses::ResponseReasoningSummaryPartAddedEvent::Part }
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute summary_index
-
# The index of the summary part within the reasoning summary.
-
#
-
# @return [Integer]
-
1
required :summary_index, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.reasoning_summary_part.added`.
-
#
-
# @return [Symbol, :"response.reasoning_summary_part.added"]
-
1
required :type, const: :"response.reasoning_summary_part.added"
-
-
# @!method initialize(item_id:, output_index:, part:, sequence_number:, summary_index:, type: :"response.reasoning_summary_part.added")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent} for more
-
# details.
-
#
-
# Emitted when a new reasoning summary part is added.
-
#
-
# @param item_id [String] The ID of the item this summary part is associated with.
-
#
-
# @param output_index [Integer] The index of the output item this summary part is associated with.
-
#
-
# @param part [OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent::Part] The summary part that was added.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param summary_index [Integer] The index of the summary part within the reasoning summary.
-
#
-
# @param type [Symbol, :"response.reasoning_summary_part.added"] The type of the event. Always `response.reasoning_summary_part.added`.
-
-
# @see OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent#part
-
1
class Part < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The text of the summary part.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of the summary part. Always `summary_text`.
-
#
-
# @return [Symbol, :summary_text]
-
1
required :type, const: :summary_text
-
-
# @!method initialize(text:, type: :summary_text)
-
# The summary part that was added.
-
#
-
# @param text [String] The text of the summary part.
-
#
-
# @param type [Symbol, :summary_text] The type of the summary part. Always `summary_text`.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseReasoningSummaryPartDoneEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The ID of the item this summary part is associated with.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item this summary part is associated with.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute part
-
# The completed summary part.
-
#
-
# @return [OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent::Part]
-
1
required :part, -> { OpenAI::Responses::ResponseReasoningSummaryPartDoneEvent::Part }
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute summary_index
-
# The index of the summary part within the reasoning summary.
-
#
-
# @return [Integer]
-
1
required :summary_index, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.reasoning_summary_part.done`.
-
#
-
# @return [Symbol, :"response.reasoning_summary_part.done"]
-
1
required :type, const: :"response.reasoning_summary_part.done"
-
-
# @!method initialize(item_id:, output_index:, part:, sequence_number:, summary_index:, type: :"response.reasoning_summary_part.done")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent} for more
-
# details.
-
#
-
# Emitted when a reasoning summary part is completed.
-
#
-
# @param item_id [String] The ID of the item this summary part is associated with.
-
#
-
# @param output_index [Integer] The index of the output item this summary part is associated with.
-
#
-
# @param part [OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent::Part] The completed summary part.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param summary_index [Integer] The index of the summary part within the reasoning summary.
-
#
-
# @param type [Symbol, :"response.reasoning_summary_part.done"] The type of the event. Always `response.reasoning_summary_part.done`.
-
-
# @see OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent#part
-
1
class Part < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The text of the summary part.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of the summary part. Always `summary_text`.
-
#
-
# @return [Symbol, :summary_text]
-
1
required :type, const: :summary_text
-
-
# @!method initialize(text:, type: :summary_text)
-
# The completed summary part.
-
#
-
# @param text [String] The text of the summary part.
-
#
-
# @param type [Symbol, :summary_text] The type of the summary part. Always `summary_text`.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseReasoningSummaryTextDeltaEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute delta
-
# The text delta that was added to the summary.
-
#
-
# @return [String]
-
1
required :delta, String
-
-
# @!attribute item_id
-
# The ID of the item this summary text delta is associated with.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item this summary text delta is associated with.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute summary_index
-
# The index of the summary part within the reasoning summary.
-
#
-
# @return [Integer]
-
1
required :summary_index, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.reasoning_summary_text.delta`.
-
#
-
# @return [Symbol, :"response.reasoning_summary_text.delta"]
-
1
required :type, const: :"response.reasoning_summary_text.delta"
-
-
# @!method initialize(delta:, item_id:, output_index:, sequence_number:, summary_index:, type: :"response.reasoning_summary_text.delta")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent} for more
-
# details.
-
#
-
# Emitted when a delta is added to a reasoning summary text.
-
#
-
# @param delta [String] The text delta that was added to the summary.
-
#
-
# @param item_id [String] The ID of the item this summary text delta is associated with.
-
#
-
# @param output_index [Integer] The index of the output item this summary text delta is associated with.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param summary_index [Integer] The index of the summary part within the reasoning summary.
-
#
-
# @param type [Symbol, :"response.reasoning_summary_text.delta"] The type of the event. Always `response.reasoning_summary_text.delta`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseReasoningSummaryTextDoneEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# The ID of the item this summary text is associated with.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item this summary text is associated with.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute summary_index
-
# The index of the summary part within the reasoning summary.
-
#
-
# @return [Integer]
-
1
required :summary_index, Integer
-
-
# @!attribute text
-
# The full text of the completed reasoning summary.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of the event. Always `response.reasoning_summary_text.done`.
-
#
-
# @return [Symbol, :"response.reasoning_summary_text.done"]
-
1
required :type, const: :"response.reasoning_summary_text.done"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, summary_index:, text:, type: :"response.reasoning_summary_text.done")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent} for more
-
# details.
-
#
-
# Emitted when a reasoning summary text is completed.
-
#
-
# @param item_id [String] The ID of the item this summary text is associated with.
-
#
-
# @param output_index [Integer] The index of the output item this summary text is associated with.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param summary_index [Integer] The index of the summary part within the reasoning summary.
-
#
-
# @param text [String] The full text of the completed reasoning summary.
-
#
-
# @param type [Symbol, :"response.reasoning_summary_text.done"] The type of the event. Always `response.reasoning_summary_text.done`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseRefusalDeltaEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute content_index
-
# The index of the content part that the refusal text is added to.
-
#
-
# @return [Integer]
-
1
required :content_index, Integer
-
-
# @!attribute delta
-
# The refusal text that is added.
-
#
-
# @return [String]
-
1
required :delta, String
-
-
# @!attribute item_id
-
# The ID of the output item that the refusal text is added to.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item that the refusal text is added to.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.refusal.delta`.
-
#
-
# @return [Symbol, :"response.refusal.delta"]
-
1
required :type, const: :"response.refusal.delta"
-
-
# @!method initialize(content_index:, delta:, item_id:, output_index:, sequence_number:, type: :"response.refusal.delta")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseRefusalDeltaEvent} for more details.
-
#
-
# Emitted when there is a partial refusal text.
-
#
-
# @param content_index [Integer] The index of the content part that the refusal text is added to.
-
#
-
# @param delta [String] The refusal text that is added.
-
#
-
# @param item_id [String] The ID of the output item that the refusal text is added to.
-
#
-
# @param output_index [Integer] The index of the output item that the refusal text is added to.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.refusal.delta"] The type of the event. Always `response.refusal.delta`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseRefusalDoneEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute content_index
-
# The index of the content part that the refusal text is finalized.
-
#
-
# @return [Integer]
-
1
required :content_index, Integer
-
-
# @!attribute item_id
-
# The ID of the output item that the refusal text is finalized.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item that the refusal text is finalized.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute refusal
-
# The refusal text that is finalized.
-
#
-
# @return [String]
-
1
required :refusal, String
-
-
# @!attribute sequence_number
-
# The sequence number of this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.refusal.done`.
-
#
-
# @return [Symbol, :"response.refusal.done"]
-
1
required :type, const: :"response.refusal.done"
-
-
# @!method initialize(content_index:, item_id:, output_index:, refusal:, sequence_number:, type: :"response.refusal.done")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseRefusalDoneEvent} for more details.
-
#
-
# Emitted when refusal text is finalized.
-
#
-
# @param content_index [Integer] The index of the content part that the refusal text is finalized.
-
#
-
# @param item_id [String] The ID of the output item that the refusal text is finalized.
-
#
-
# @param output_index [Integer] The index of the output item that the refusal text is finalized.
-
#
-
# @param refusal [String] The refusal text that is finalized.
-
#
-
# @param sequence_number [Integer] The sequence number of this event.
-
#
-
# @param type [Symbol, :"response.refusal.done"] The type of the event. Always `response.refusal.done`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# @see OpenAI::Resources::Responses#retrieve
-
#
-
# @see OpenAI::Resources::Responses#retrieve_streaming
-
1
class ResponseRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute include
-
# Additional fields to include in the response. See the `include` parameter for
-
# Response creation above for more information.
-
#
-
# @return [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil]
-
1
optional :include, -> { OpenAI::Internal::Type::ArrayOf[enum: OpenAI::Responses::ResponseIncludable] }
-
-
# @!attribute starting_after
-
# The sequence number of the event after which to start streaming.
-
#
-
# @return [Integer, nil]
-
1
optional :starting_after, Integer
-
-
# @!method initialize(include: nil, starting_after: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseRetrieveParams} for more details.
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>] Additional fields to include in the response. See the `include`
-
#
-
# @param starting_after [Integer] The sequence number of the event after which to start streaming.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# The status of the response generation. One of `completed`, `failed`,
-
# `in_progress`, `cancelled`, `queued`, or `incomplete`.
-
1
module ResponseStatus
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
COMPLETED = :completed
-
1
FAILED = :failed
-
1
IN_PROGRESS = :in_progress
-
1
CANCELLED = :cancelled
-
1
QUEUED = :queued
-
1
INCOMPLETE = :incomplete
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# Emitted when there is a partial audio response.
-
1
module ResponseStreamEvent
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# Emitted when there is a partial audio response.
-
1
variant :"response.audio.delta", -> { OpenAI::Responses::ResponseAudioDeltaEvent }
-
-
# Emitted when the audio response is complete.
-
1
variant :"response.audio.done", -> { OpenAI::Responses::ResponseAudioDoneEvent }
-
-
# Emitted when there is a partial transcript of audio.
-
1
variant :"response.audio.transcript.delta",
-
-> {
-
OpenAI::Responses::ResponseAudioTranscriptDeltaEvent
-
}
-
-
# Emitted when the full audio transcript is completed.
-
1
variant :"response.audio.transcript.done", -> { OpenAI::Responses::ResponseAudioTranscriptDoneEvent }
-
-
# Emitted when a partial code snippet is streamed by the code interpreter.
-
1
variant :"response.code_interpreter_call_code.delta",
-
-> { OpenAI::Responses::ResponseCodeInterpreterCallCodeDeltaEvent }
-
-
# Emitted when the code snippet is finalized by the code interpreter.
-
1
variant :"response.code_interpreter_call_code.done",
-
-> { OpenAI::Responses::ResponseCodeInterpreterCallCodeDoneEvent }
-
-
# Emitted when the code interpreter call is completed.
-
1
variant :"response.code_interpreter_call.completed",
-
-> { OpenAI::Responses::ResponseCodeInterpreterCallCompletedEvent }
-
-
# Emitted when a code interpreter call is in progress.
-
1
variant :"response.code_interpreter_call.in_progress",
-
-> { OpenAI::Responses::ResponseCodeInterpreterCallInProgressEvent }
-
-
# Emitted when the code interpreter is actively interpreting the code snippet.
-
1
variant :"response.code_interpreter_call.interpreting",
-
-> { OpenAI::Responses::ResponseCodeInterpreterCallInterpretingEvent }
-
-
# Emitted when the model response is complete.
-
1
variant :"response.completed", -> { OpenAI::Responses::ResponseCompletedEvent }
-
-
# Emitted when a new content part is added.
-
1
variant :"response.content_part.added", -> { OpenAI::Responses::ResponseContentPartAddedEvent }
-
-
# Emitted when a content part is done.
-
1
variant :"response.content_part.done", -> { OpenAI::Responses::ResponseContentPartDoneEvent }
-
-
# An event that is emitted when a response is created.
-
1
variant :"response.created", -> { OpenAI::Responses::ResponseCreatedEvent }
-
-
# Emitted when an error occurs.
-
1
variant :error, -> { OpenAI::Responses::ResponseErrorEvent }
-
-
# Emitted when a file search call is completed (results found).
-
1
variant :"response.file_search_call.completed",
-
-> { OpenAI::Responses::ResponseFileSearchCallCompletedEvent }
-
-
# Emitted when a file search call is initiated.
-
1
variant :"response.file_search_call.in_progress",
-
-> { OpenAI::Responses::ResponseFileSearchCallInProgressEvent }
-
-
# Emitted when a file search is currently searching.
-
1
variant :"response.file_search_call.searching",
-
-> { OpenAI::Responses::ResponseFileSearchCallSearchingEvent }
-
-
# Emitted when there is a partial function-call arguments delta.
-
1
variant :"response.function_call_arguments.delta",
-
-> { OpenAI::Responses::ResponseFunctionCallArgumentsDeltaEvent }
-
-
# Emitted when function-call arguments are finalized.
-
1
variant :"response.function_call_arguments.done",
-
-> { OpenAI::Responses::ResponseFunctionCallArgumentsDoneEvent }
-
-
# Emitted when the response is in progress.
-
1
variant :"response.in_progress", -> { OpenAI::Responses::ResponseInProgressEvent }
-
-
# An event that is emitted when a response fails.
-
1
variant :"response.failed", -> { OpenAI::Responses::ResponseFailedEvent }
-
-
# An event that is emitted when a response finishes as incomplete.
-
1
variant :"response.incomplete", -> { OpenAI::Responses::ResponseIncompleteEvent }
-
-
# Emitted when a new output item is added.
-
1
variant :"response.output_item.added", -> { OpenAI::Responses::ResponseOutputItemAddedEvent }
-
-
# Emitted when an output item is marked done.
-
1
variant :"response.output_item.done", -> { OpenAI::Responses::ResponseOutputItemDoneEvent }
-
-
# Emitted when a new reasoning summary part is added.
-
1
variant :"response.reasoning_summary_part.added",
-
-> { OpenAI::Responses::ResponseReasoningSummaryPartAddedEvent }
-
-
# Emitted when a reasoning summary part is completed.
-
1
variant :"response.reasoning_summary_part.done",
-
-> { OpenAI::Responses::ResponseReasoningSummaryPartDoneEvent }
-
-
# Emitted when a delta is added to a reasoning summary text.
-
1
variant :"response.reasoning_summary_text.delta",
-
-> { OpenAI::Responses::ResponseReasoningSummaryTextDeltaEvent }
-
-
# Emitted when a reasoning summary text is completed.
-
1
variant :"response.reasoning_summary_text.done",
-
-> { OpenAI::Responses::ResponseReasoningSummaryTextDoneEvent }
-
-
# Emitted when there is a partial refusal text.
-
1
variant :"response.refusal.delta", -> { OpenAI::Responses::ResponseRefusalDeltaEvent }
-
-
# Emitted when refusal text is finalized.
-
1
variant :"response.refusal.done", -> { OpenAI::Responses::ResponseRefusalDoneEvent }
-
-
# Emitted when there is an additional text delta.
-
1
variant :"response.output_text.delta", -> { OpenAI::Responses::ResponseTextDeltaEvent }
-
-
# Emitted when text content is finalized.
-
1
variant :"response.output_text.done", -> { OpenAI::Responses::ResponseTextDoneEvent }
-
-
# Emitted when a web search call is completed.
-
1
variant :"response.web_search_call.completed",
-
-> { OpenAI::Responses::ResponseWebSearchCallCompletedEvent }
-
-
# Emitted when a web search call is initiated.
-
1
variant :"response.web_search_call.in_progress",
-
-> { OpenAI::Responses::ResponseWebSearchCallInProgressEvent }
-
-
# Emitted when a web search call is executing.
-
1
variant :"response.web_search_call.searching",
-
-> { OpenAI::Responses::ResponseWebSearchCallSearchingEvent }
-
-
# Emitted when an image generation tool call has completed and the final image is available.
-
1
variant :"response.image_generation_call.completed",
-
-> { OpenAI::Responses::ResponseImageGenCallCompletedEvent }
-
-
# Emitted when an image generation tool call is actively generating an image (intermediate state).
-
1
variant :"response.image_generation_call.generating",
-
-> { OpenAI::Responses::ResponseImageGenCallGeneratingEvent }
-
-
# Emitted when an image generation tool call is in progress.
-
1
variant :"response.image_generation_call.in_progress",
-
-> { OpenAI::Responses::ResponseImageGenCallInProgressEvent }
-
-
# Emitted when a partial image is available during image generation streaming.
-
1
variant :"response.image_generation_call.partial_image",
-
-> { OpenAI::Responses::ResponseImageGenCallPartialImageEvent }
-
-
# Emitted when there is a delta (partial update) to the arguments of an MCP tool call.
-
1
variant :"response.mcp_call_arguments.delta",
-
-> {
-
OpenAI::Responses::ResponseMcpCallArgumentsDeltaEvent
-
}
-
-
# Emitted when the arguments for an MCP tool call are finalized.
-
1
variant :"response.mcp_call_arguments.done",
-
-> {
-
OpenAI::Responses::ResponseMcpCallArgumentsDoneEvent
-
}
-
-
# Emitted when an MCP tool call has completed successfully.
-
1
variant :"response.mcp_call.completed", -> { OpenAI::Responses::ResponseMcpCallCompletedEvent }
-
-
# Emitted when an MCP tool call has failed.
-
1
variant :"response.mcp_call.failed", -> { OpenAI::Responses::ResponseMcpCallFailedEvent }
-
-
# Emitted when an MCP tool call is in progress.
-
1
variant :"response.mcp_call.in_progress", -> { OpenAI::Responses::ResponseMcpCallInProgressEvent }
-
-
# Emitted when the list of available MCP tools has been successfully retrieved.
-
1
variant :"response.mcp_list_tools.completed",
-
-> {
-
OpenAI::Responses::ResponseMcpListToolsCompletedEvent
-
}
-
-
# Emitted when the attempt to list available MCP tools has failed.
-
1
variant :"response.mcp_list_tools.failed", -> { OpenAI::Responses::ResponseMcpListToolsFailedEvent }
-
-
# Emitted when the system is in the process of retrieving the list of available MCP tools.
-
1
variant :"response.mcp_list_tools.in_progress",
-
-> { OpenAI::Responses::ResponseMcpListToolsInProgressEvent }
-
-
# Emitted when an annotation is added to output text content.
-
1
variant :"response.output_text.annotation.added",
-
-> { OpenAI::Responses::ResponseOutputTextAnnotationAddedEvent }
-
-
# Emitted when a response is queued and waiting to be processed.
-
1
variant :"response.queued", -> { OpenAI::Responses::ResponseQueuedEvent }
-
-
# Emitted when there is a delta (partial update) to the reasoning summary content.
-
1
variant :"response.reasoning_summary.delta",
-
-> {
-
OpenAI::Responses::ResponseReasoningSummaryDeltaEvent
-
}
-
-
# Emitted when the reasoning summary content is finalized for an item.
-
1
variant :"response.reasoning_summary.done",
-
-> {
-
OpenAI::Responses::ResponseReasoningSummaryDoneEvent
-
}
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseTextConfig < OpenAI::Internal::Type::BaseModel
-
# @!attribute format_
-
# An object specifying the format that the model must output.
-
#
-
# Configuring `{ "type": "json_schema" }` enables Structured Outputs, which
-
# ensures the model will match your supplied JSON schema. Learn more in the
-
# [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
-
#
-
# The default format is `{ "type": "text" }` with no additional options.
-
#
-
# **Not recommended for gpt-4o and newer models:**
-
#
-
# Setting to `{ "type": "json_object" }` enables the older JSON mode, which
-
# ensures the message the model generates is valid JSON. Using `json_schema` is
-
# preferred for models that support it.
-
#
-
# @return [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject, nil]
-
1
optional :format_, union: -> { OpenAI::Responses::ResponseFormatTextConfig }, api_name: :format
-
-
# @!method initialize(format_: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseTextConfig} for more details.
-
#
-
# Configuration options for a text response from the model. Can be plain text or
-
# structured JSON data. Learn more:
-
#
-
# - [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
-
# - [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
-
#
-
# @param format_ [OpenAI::Models::ResponseFormatText, OpenAI::Models::Responses::ResponseFormatTextJSONSchemaConfig, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseTextDeltaEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute content_index
-
# The index of the content part that the text delta was added to.
-
#
-
# @return [Integer]
-
1
required :content_index, Integer
-
-
# @!attribute delta
-
# The text delta that was added.
-
#
-
# @return [String]
-
1
required :delta, String
-
-
# @!attribute item_id
-
# The ID of the output item that the text delta was added to.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute logprobs
-
# The log probabilities of the tokens in the delta.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseTextDeltaEvent::Logprob>]
-
1
required :logprobs,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDeltaEvent::Logprob] }
-
-
# @!attribute output_index
-
# The index of the output item that the text delta was added to.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number for this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.output_text.delta`.
-
#
-
# @return [Symbol, :"response.output_text.delta"]
-
1
required :type, const: :"response.output_text.delta"
-
-
# @!method initialize(content_index:, delta:, item_id:, logprobs:, output_index:, sequence_number:, type: :"response.output_text.delta")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseTextDeltaEvent} for more details.
-
#
-
# Emitted when there is an additional text delta.
-
#
-
# @param content_index [Integer] The index of the content part that the text delta was added to.
-
#
-
# @param delta [String] The text delta that was added.
-
#
-
# @param item_id [String] The ID of the output item that the text delta was added to.
-
#
-
# @param logprobs [Array<OpenAI::Models::Responses::ResponseTextDeltaEvent::Logprob>] The log probabilities of the tokens in the delta.
-
#
-
# @param output_index [Integer] The index of the output item that the text delta was added to.
-
#
-
# @param sequence_number [Integer] The sequence number for this event.
-
#
-
# @param type [Symbol, :"response.output_text.delta"] The type of the event. Always `response.output_text.delta`.
-
-
1
class Logprob < OpenAI::Internal::Type::BaseModel
-
# @!attribute token
-
# A possible text token.
-
#
-
# @return [String]
-
1
required :token, String
-
-
# @!attribute logprob
-
# The log probability of this token.
-
#
-
# @return [Float]
-
1
required :logprob, Float
-
-
# @!attribute top_logprobs
-
# The log probability of the top 20 most likely tokens.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob>, nil]
-
1
optional :top_logprobs,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob]
-
}
-
-
# @!method initialize(token:, logprob:, top_logprobs: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseTextDeltaEvent::Logprob} for more details.
-
#
-
# A logprob is the logarithmic probability that the model assigns to producing a
-
# particular token at a given position in the sequence. Less-negative (higher)
-
# logprob values indicate greater model confidence in that token choice.
-
#
-
# @param token [String] A possible text token.
-
#
-
# @param logprob [Float] The log probability of this token.
-
#
-
# @param top_logprobs [Array<OpenAI::Models::Responses::ResponseTextDeltaEvent::Logprob::TopLogprob>] The log probability of the top 20 most likely tokens.
-
-
1
class TopLogprob < OpenAI::Internal::Type::BaseModel
-
# @!attribute token
-
# A possible text token.
-
#
-
# @return [String, nil]
-
1
optional :token, String
-
-
# @!attribute logprob
-
# The log probability of this token.
-
#
-
# @return [Float, nil]
-
1
optional :logprob, Float
-
-
# @!method initialize(token: nil, logprob: nil)
-
# @param token [String] A possible text token.
-
#
-
# @param logprob [Float] The log probability of this token.
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseTextDoneEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute content_index
-
# The index of the content part that the text content is finalized.
-
#
-
# @return [Integer]
-
1
required :content_index, Integer
-
-
# @!attribute item_id
-
# The ID of the output item that the text content is finalized.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute logprobs
-
# The log probabilities of the tokens in the delta.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseTextDoneEvent::Logprob>]
-
1
required :logprobs,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDoneEvent::Logprob] }
-
-
# @!attribute output_index
-
# The index of the output item that the text content is finalized.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number for this event.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute text
-
# The text content that is finalized.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of the event. Always `response.output_text.done`.
-
#
-
# @return [Symbol, :"response.output_text.done"]
-
1
required :type, const: :"response.output_text.done"
-
-
# @!method initialize(content_index:, item_id:, logprobs:, output_index:, sequence_number:, text:, type: :"response.output_text.done")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseTextDoneEvent} for more details.
-
#
-
# Emitted when text content is finalized.
-
#
-
# @param content_index [Integer] The index of the content part that the text content is finalized.
-
#
-
# @param item_id [String] The ID of the output item that the text content is finalized.
-
#
-
# @param logprobs [Array<OpenAI::Models::Responses::ResponseTextDoneEvent::Logprob>] The log probabilities of the tokens in the delta.
-
#
-
# @param output_index [Integer] The index of the output item that the text content is finalized.
-
#
-
# @param sequence_number [Integer] The sequence number for this event.
-
#
-
# @param text [String] The text content that is finalized.
-
#
-
# @param type [Symbol, :"response.output_text.done"] The type of the event. Always `response.output_text.done`.
-
-
1
class Logprob < OpenAI::Internal::Type::BaseModel
-
# @!attribute token
-
# A possible text token.
-
#
-
# @return [String]
-
1
required :token, String
-
-
# @!attribute logprob
-
# The log probability of this token.
-
#
-
# @return [Float]
-
1
required :logprob, Float
-
-
# @!attribute top_logprobs
-
# The log probability of the top 20 most likely tokens.
-
#
-
# @return [Array<OpenAI::Models::Responses::ResponseTextDoneEvent::Logprob::TopLogprob>, nil]
-
1
optional :top_logprobs,
-
-> {
-
OpenAI::Internal::Type::ArrayOf[OpenAI::Responses::ResponseTextDoneEvent::Logprob::TopLogprob]
-
}
-
-
# @!method initialize(token:, logprob:, top_logprobs: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseTextDoneEvent::Logprob} for more details.
-
#
-
# A logprob is the logarithmic probability that the model assigns to producing a
-
# particular token at a given position in the sequence. Less-negative (higher)
-
# logprob values indicate greater model confidence in that token choice.
-
#
-
# @param token [String] A possible text token.
-
#
-
# @param logprob [Float] The log probability of this token.
-
#
-
# @param top_logprobs [Array<OpenAI::Models::Responses::ResponseTextDoneEvent::Logprob::TopLogprob>] The log probability of the top 20 most likely tokens.
-
-
1
class TopLogprob < OpenAI::Internal::Type::BaseModel
-
# @!attribute token
-
# A possible text token.
-
#
-
# @return [String, nil]
-
1
optional :token, String
-
-
# @!attribute logprob
-
# The log probability of this token.
-
#
-
# @return [Float, nil]
-
1
optional :logprob, Float
-
-
# @!method initialize(token: nil, logprob: nil)
-
# @param token [String] A possible text token.
-
#
-
# @param logprob [Float] The log probability of this token.
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseUsage < OpenAI::Internal::Type::BaseModel
-
# @!attribute input_tokens
-
# The number of input tokens.
-
#
-
# @return [Integer]
-
1
required :input_tokens, Integer
-
-
# @!attribute input_tokens_details
-
# A detailed breakdown of the input tokens.
-
#
-
# @return [OpenAI::Models::Responses::ResponseUsage::InputTokensDetails]
-
1
required :input_tokens_details, -> { OpenAI::Responses::ResponseUsage::InputTokensDetails }
-
-
# @!attribute output_tokens
-
# The number of output tokens.
-
#
-
# @return [Integer]
-
1
required :output_tokens, Integer
-
-
# @!attribute output_tokens_details
-
# A detailed breakdown of the output tokens.
-
#
-
# @return [OpenAI::Models::Responses::ResponseUsage::OutputTokensDetails]
-
1
required :output_tokens_details, -> { OpenAI::Responses::ResponseUsage::OutputTokensDetails }
-
-
# @!attribute total_tokens
-
# The total number of tokens used.
-
#
-
# @return [Integer]
-
1
required :total_tokens, Integer
-
-
# @!method initialize(input_tokens:, input_tokens_details:, output_tokens:, output_tokens_details:, total_tokens:)
-
# Represents token usage details including input tokens, output tokens, a
-
# breakdown of output tokens, and the total tokens used.
-
#
-
# @param input_tokens [Integer] The number of input tokens.
-
#
-
# @param input_tokens_details [OpenAI::Models::Responses::ResponseUsage::InputTokensDetails] A detailed breakdown of the input tokens.
-
#
-
# @param output_tokens [Integer] The number of output tokens.
-
#
-
# @param output_tokens_details [OpenAI::Models::Responses::ResponseUsage::OutputTokensDetails] A detailed breakdown of the output tokens.
-
#
-
# @param total_tokens [Integer] The total number of tokens used.
-
-
# @see OpenAI::Models::Responses::ResponseUsage#input_tokens_details
-
1
class InputTokensDetails < OpenAI::Internal::Type::BaseModel
-
# @!attribute cached_tokens
-
# The number of tokens that were retrieved from the cache.
-
# [More on prompt caching](https://platform.openai.com/docs/guides/prompt-caching).
-
#
-
# @return [Integer]
-
1
required :cached_tokens, Integer
-
-
# @!method initialize(cached_tokens:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseUsage::InputTokensDetails} for more details.
-
#
-
# A detailed breakdown of the input tokens.
-
#
-
# @param cached_tokens [Integer] The number of tokens that were retrieved from the cache.
-
end
-
-
# @see OpenAI::Models::Responses::ResponseUsage#output_tokens_details
-
1
class OutputTokensDetails < OpenAI::Internal::Type::BaseModel
-
# @!attribute reasoning_tokens
-
# The number of reasoning tokens.
-
#
-
# @return [Integer]
-
1
required :reasoning_tokens, Integer
-
-
# @!method initialize(reasoning_tokens:)
-
# A detailed breakdown of the output tokens.
-
#
-
# @param reasoning_tokens [Integer] The number of reasoning tokens.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseWebSearchCallCompletedEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# Unique ID for the output item associated with the web search call.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item that the web search call is associated with.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of the web search call being processed.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.web_search_call.completed`.
-
#
-
# @return [Symbol, :"response.web_search_call.completed"]
-
1
required :type, const: :"response.web_search_call.completed"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.web_search_call.completed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent} for more
-
# details.
-
#
-
# Emitted when a web search call is completed.
-
#
-
# @param item_id [String] Unique ID for the output item associated with the web search call.
-
#
-
# @param output_index [Integer] The index of the output item that the web search call is associated with.
-
#
-
# @param sequence_number [Integer] The sequence number of the web search call being processed.
-
#
-
# @param type [Symbol, :"response.web_search_call.completed"] The type of the event. Always `response.web_search_call.completed`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseWebSearchCallInProgressEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# Unique ID for the output item associated with the web search call.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item that the web search call is associated with.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of the web search call being processed.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.web_search_call.in_progress`.
-
#
-
# @return [Symbol, :"response.web_search_call.in_progress"]
-
1
required :type, const: :"response.web_search_call.in_progress"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.web_search_call.in_progress")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent} for more
-
# details.
-
#
-
# Emitted when a web search call is initiated.
-
#
-
# @param item_id [String] Unique ID for the output item associated with the web search call.
-
#
-
# @param output_index [Integer] The index of the output item that the web search call is associated with.
-
#
-
# @param sequence_number [Integer] The sequence number of the web search call being processed.
-
#
-
# @param type [Symbol, :"response.web_search_call.in_progress"] The type of the event. Always `response.web_search_call.in_progress`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ResponseWebSearchCallSearchingEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute item_id
-
# Unique ID for the output item associated with the web search call.
-
#
-
# @return [String]
-
1
required :item_id, String
-
-
# @!attribute output_index
-
# The index of the output item that the web search call is associated with.
-
#
-
# @return [Integer]
-
1
required :output_index, Integer
-
-
# @!attribute sequence_number
-
# The sequence number of the web search call being processed.
-
#
-
# @return [Integer]
-
1
required :sequence_number, Integer
-
-
# @!attribute type
-
# The type of the event. Always `response.web_search_call.searching`.
-
#
-
# @return [Symbol, :"response.web_search_call.searching"]
-
1
required :type, const: :"response.web_search_call.searching"
-
-
# @!method initialize(item_id:, output_index:, sequence_number:, type: :"response.web_search_call.searching")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent} for more
-
# details.
-
#
-
# Emitted when a web search call is executing.
-
#
-
# @param item_id [String] Unique ID for the output item associated with the web search call.
-
#
-
# @param output_index [Integer] The index of the output item that the web search call is associated with.
-
#
-
# @param sequence_number [Integer] The sequence number of the web search call being processed.
-
#
-
# @param type [Symbol, :"response.web_search_call.searching"] The type of the event. Always `response.web_search_call.searching`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# A tool that can be used to generate a response.
-
1
module Tool
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# Defines a function in your own code the model can choose to call. Learn more about [function calling](https://platform.openai.com/docs/guides/function-calling).
-
1
variant :function, -> { OpenAI::Responses::FunctionTool }
-
-
1
variant -> { OpenAI::StructuredOutput::JsonSchemaConverter }
-
-
# A tool that searches for relevant content from uploaded files. Learn more about the [file search tool](https://platform.openai.com/docs/guides/tools-file-search).
-
1
variant :file_search, -> { OpenAI::Responses::FileSearchTool }
-
-
# A tool that controls a virtual computer. Learn more about the [computer tool](https://platform.openai.com/docs/guides/tools-computer-use).
-
1
variant :computer_use_preview, -> { OpenAI::Responses::ComputerTool }
-
-
# Give the model access to additional tools via remote Model Context Protocol
-
# (MCP) servers. [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
-
1
variant :mcp, -> { OpenAI::Responses::Tool::Mcp }
-
-
# A tool that runs Python code to help generate a response to a prompt.
-
1
variant :code_interpreter, -> { OpenAI::Responses::Tool::CodeInterpreter }
-
-
# A tool that generates images using a model like `gpt-image-1`.
-
1
variant :image_generation, -> { OpenAI::Responses::Tool::ImageGeneration }
-
-
# A tool that allows the model to execute shell commands in a local environment.
-
1
variant :local_shell, -> { OpenAI::Responses::Tool::LocalShell }
-
-
# This tool searches the web for relevant results to use in a response. Learn more about the [web search tool](https://platform.openai.com/docs/guides/tools-web-search).
-
1
variant -> { OpenAI::Responses::WebSearchTool }
-
-
1
class Mcp < OpenAI::Internal::Type::BaseModel
-
# @!attribute server_label
-
# A label for this MCP server, used to identify it in tool calls.
-
#
-
# @return [String]
-
1
required :server_label, String
-
-
# @!attribute server_url
-
# The URL for the MCP server.
-
#
-
# @return [String]
-
1
required :server_url, String
-
-
# @!attribute type
-
# The type of the MCP tool. Always `mcp`.
-
#
-
# @return [Symbol, :mcp]
-
1
required :type, const: :mcp
-
-
# @!attribute allowed_tools
-
# List of allowed tool names or a filter object.
-
#
-
# @return [Array<String>, OpenAI::Models::Responses::Tool::Mcp::AllowedTools::McpAllowedToolsFilter, nil]
-
1
optional :allowed_tools, union: -> { OpenAI::Responses::Tool::Mcp::AllowedTools }, nil?: true
-
-
# @!attribute headers
-
# Optional HTTP headers to send to the MCP server. Use for authentication or other
-
# purposes.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :headers, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute require_approval
-
# Specify which of the MCP server's tools require approval.
-
#
-
# @return [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter, Symbol, OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalSetting, nil]
-
1
optional :require_approval, union: -> { OpenAI::Responses::Tool::Mcp::RequireApproval }, nil?: true
-
-
# @!attribute server_description
-
# Optional description of the MCP server, used to provide more context.
-
#
-
# @return [String, nil]
-
1
optional :server_description, String
-
-
# @!method initialize(server_label:, server_url:, allowed_tools: nil, headers: nil, require_approval: nil, server_description: nil, type: :mcp)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::Tool::Mcp} for more details.
-
#
-
# Give the model access to additional tools via remote Model Context Protocol
-
# (MCP) servers.
-
# [Learn more about MCP](https://platform.openai.com/docs/guides/tools-remote-mcp).
-
#
-
# @param server_label [String] A label for this MCP server, used to identify it in tool calls.
-
#
-
# @param server_url [String] The URL for the MCP server.
-
#
-
# @param allowed_tools [Array<String>, OpenAI::Models::Responses::Tool::Mcp::AllowedTools::McpAllowedToolsFilter, nil] List of allowed tool names or a filter object.
-
#
-
# @param headers [Hash{Symbol=>String}, nil] Optional HTTP headers to send to the MCP server. Use for authentication
-
#
-
# @param require_approval [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter, Symbol, OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalSetting, nil] Specify which of the MCP server's tools require approval.
-
#
-
# @param server_description [String] Optional description of the MCP server, used to provide more context.
-
#
-
# @param type [Symbol, :mcp] The type of the MCP tool. Always `mcp`.
-
-
# List of allowed tool names or a filter object.
-
#
-
# @see OpenAI::Models::Responses::Tool::Mcp#allowed_tools
-
1
module AllowedTools
-
1
extend OpenAI::Internal::Type::Union
-
-
# A string array of allowed tool names
-
1
variant -> { OpenAI::Models::Responses::Tool::Mcp::AllowedTools::StringArray }
-
-
# A filter object to specify which tools are allowed.
-
1
variant -> { OpenAI::Responses::Tool::Mcp::AllowedTools::McpAllowedToolsFilter }
-
-
1
class McpAllowedToolsFilter < OpenAI::Internal::Type::BaseModel
-
# @!attribute tool_names
-
# List of allowed tool names.
-
#
-
# @return [Array<String>, nil]
-
1
optional :tool_names, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(tool_names: nil)
-
# A filter object to specify which tools are allowed.
-
#
-
# @param tool_names [Array<String>] List of allowed tool names.
-
end
-
-
# @!method self.variants
-
# @return [Array(Array<String>, OpenAI::Models::Responses::Tool::Mcp::AllowedTools::McpAllowedToolsFilter)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
StringArray = OpenAI::Internal::Type::ArrayOf[String]
-
end
-
-
# Specify which of the MCP server's tools require approval.
-
#
-
# @see OpenAI::Models::Responses::Tool::Mcp#require_approval
-
1
module RequireApproval
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant -> { OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter }
-
-
# Specify a single approval policy for all tools. One of `always` or
-
# `never`. When set to `always`, all tools will require approval. When
-
# set to `never`, all tools will not require approval.
-
1
variant enum: -> { OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalSetting }
-
-
1
class McpToolApprovalFilter < OpenAI::Internal::Type::BaseModel
-
# @!attribute always
-
# A list of tools that always require approval.
-
#
-
# @return [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always, nil]
-
1
optional :always,
-
-> {
-
OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always
-
}
-
-
# @!attribute never
-
# A list of tools that never require approval.
-
#
-
# @return [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never, nil]
-
1
optional :never,
-
-> {
-
OpenAI::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never
-
}
-
-
# @!method initialize(always: nil, never: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter}
-
# for more details.
-
#
-
# @param always [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Always] A list of tools that always require approval.
-
#
-
# @param never [OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter::Never] A list of tools that never require approval.
-
-
# @see OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter#always
-
1
class Always < OpenAI::Internal::Type::BaseModel
-
# @!attribute tool_names
-
# List of tools that require approval.
-
#
-
# @return [Array<String>, nil]
-
1
optional :tool_names, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(tool_names: nil)
-
# A list of tools that always require approval.
-
#
-
# @param tool_names [Array<String>] List of tools that require approval.
-
end
-
-
# @see OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter#never
-
1
class Never < OpenAI::Internal::Type::BaseModel
-
# @!attribute tool_names
-
# List of tools that do not require approval.
-
#
-
# @return [Array<String>, nil]
-
1
optional :tool_names, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(tool_names: nil)
-
# A list of tools that never require approval.
-
#
-
# @param tool_names [Array<String>] List of tools that do not require approval.
-
end
-
end
-
-
# Specify a single approval policy for all tools. One of `always` or `never`. When
-
# set to `always`, all tools will require approval. When set to `never`, all tools
-
# will not require approval.
-
1
module McpToolApprovalSetting
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ALWAYS = :always
-
1
NEVER = :never
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalFilter, Symbol, OpenAI::Models::Responses::Tool::Mcp::RequireApproval::McpToolApprovalSetting)]
-
end
-
end
-
-
1
class CodeInterpreter < OpenAI::Internal::Type::BaseModel
-
# @!attribute container
-
# The code interpreter container. Can be a container ID or an object that
-
# specifies uploaded file IDs to make available to your code.
-
#
-
# @return [String, OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto]
-
1
required :container, union: -> { OpenAI::Responses::Tool::CodeInterpreter::Container }
-
-
# @!attribute type
-
# The type of the code interpreter tool. Always `code_interpreter`.
-
#
-
# @return [Symbol, :code_interpreter]
-
1
required :type, const: :code_interpreter
-
-
# @!method initialize(container:, type: :code_interpreter)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::Tool::CodeInterpreter} for more details.
-
#
-
# A tool that runs Python code to help generate a response to a prompt.
-
#
-
# @param container [String, OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto] The code interpreter container. Can be a container ID or an object that
-
#
-
# @param type [Symbol, :code_interpreter] The type of the code interpreter tool. Always `code_interpreter`.
-
-
# The code interpreter container. Can be a container ID or an object that
-
# specifies uploaded file IDs to make available to your code.
-
#
-
# @see OpenAI::Models::Responses::Tool::CodeInterpreter#container
-
1
module Container
-
1
extend OpenAI::Internal::Type::Union
-
-
# The container ID.
-
1
variant String
-
-
# Configuration for a code interpreter container. Optionally specify the IDs
-
# of the files to run the code on.
-
1
variant -> { OpenAI::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto }
-
-
1
class CodeInterpreterToolAuto < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# Always `auto`.
-
#
-
# @return [Symbol, :auto]
-
1
required :type, const: :auto
-
-
# @!attribute file_ids
-
# An optional list of uploaded files to make available to your code.
-
#
-
# @return [Array<String>, nil]
-
1
optional :file_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!method initialize(file_ids: nil, type: :auto)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto}
-
# for more details.
-
#
-
# Configuration for a code interpreter container. Optionally specify the IDs of
-
# the files to run the code on.
-
#
-
# @param file_ids [Array<String>] An optional list of uploaded files to make available to your code.
-
#
-
# @param type [Symbol, :auto] Always `auto`.
-
end
-
-
# @!method self.variants
-
# @return [Array(String, OpenAI::Models::Responses::Tool::CodeInterpreter::Container::CodeInterpreterToolAuto)]
-
end
-
end
-
-
1
class ImageGeneration < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of the image generation tool. Always `image_generation`.
-
#
-
# @return [Symbol, :image_generation]
-
1
required :type, const: :image_generation
-
-
# @!attribute background
-
# Background type for the generated image. One of `transparent`, `opaque`, or
-
# `auto`. Default: `auto`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Background, nil]
-
1
optional :background, enum: -> { OpenAI::Responses::Tool::ImageGeneration::Background }
-
-
# @!attribute input_fidelity
-
# Control how much effort the model will exert to match the style and features,
-
# especially facial features, of input images. This parameter is only supported
-
# for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::InputFidelity, nil]
-
1
optional :input_fidelity,
-
enum: -> {
-
OpenAI::Responses::Tool::ImageGeneration::InputFidelity
-
},
-
nil?: true
-
-
# @!attribute input_image_mask
-
# Optional mask for inpainting. Contains `image_url` (string, optional) and
-
# `file_id` (string, optional).
-
#
-
# @return [OpenAI::Models::Responses::Tool::ImageGeneration::InputImageMask, nil]
-
1
optional :input_image_mask, -> { OpenAI::Responses::Tool::ImageGeneration::InputImageMask }
-
-
# @!attribute model
-
# The image generation model to use. Default: `gpt-image-1`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Model, nil]
-
1
optional :model, enum: -> { OpenAI::Responses::Tool::ImageGeneration::Model }
-
-
# @!attribute moderation
-
# Moderation level for the generated image. Default: `auto`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Moderation, nil]
-
1
optional :moderation, enum: -> { OpenAI::Responses::Tool::ImageGeneration::Moderation }
-
-
# @!attribute output_compression
-
# Compression level for the output image. Default: 100.
-
#
-
# @return [Integer, nil]
-
1
optional :output_compression, Integer
-
-
# @!attribute output_format
-
# The output format of the generated image. One of `png`, `webp`, or `jpeg`.
-
# Default: `png`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::OutputFormat, nil]
-
1
optional :output_format, enum: -> { OpenAI::Responses::Tool::ImageGeneration::OutputFormat }
-
-
# @!attribute partial_images
-
# Number of partial images to generate in streaming mode, from 0 (default value)
-
# to 3.
-
#
-
# @return [Integer, nil]
-
1
optional :partial_images, Integer
-
-
# @!attribute quality
-
# The quality of the generated image. One of `low`, `medium`, `high`, or `auto`.
-
# Default: `auto`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Quality, nil]
-
1
optional :quality, enum: -> { OpenAI::Responses::Tool::ImageGeneration::Quality }
-
-
# @!attribute size
-
# The size of the generated image. One of `1024x1024`, `1024x1536`, `1536x1024`,
-
# or `auto`. Default: `auto`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Size, nil]
-
1
optional :size, enum: -> { OpenAI::Responses::Tool::ImageGeneration::Size }
-
-
# @!method initialize(background: nil, input_fidelity: nil, input_image_mask: nil, model: nil, moderation: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, size: nil, type: :image_generation)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::Tool::ImageGeneration} for more details.
-
#
-
# A tool that generates images using a model like `gpt-image-1`.
-
#
-
# @param background [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Background] Background type for the generated image. One of `transparent`,
-
#
-
# @param input_fidelity [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::InputFidelity, nil] Control how much effort the model will exert to match the style and features,
-
#
-
# @param input_image_mask [OpenAI::Models::Responses::Tool::ImageGeneration::InputImageMask] Optional mask for inpainting. Contains `image_url`
-
#
-
# @param model [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Model] The image generation model to use. Default: `gpt-image-1`.
-
#
-
# @param moderation [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Moderation] Moderation level for the generated image. Default: `auto`.
-
#
-
# @param output_compression [Integer] Compression level for the output image. Default: 100.
-
#
-
# @param output_format [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::OutputFormat] The output format of the generated image. One of `png`, `webp`, or
-
#
-
# @param partial_images [Integer] Number of partial images to generate in streaming mode, from 0 (default value) t
-
#
-
# @param quality [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Quality] The quality of the generated image. One of `low`, `medium`, `high`,
-
#
-
# @param size [Symbol, OpenAI::Models::Responses::Tool::ImageGeneration::Size] The size of the generated image. One of `1024x1024`, `1024x1536`,
-
#
-
# @param type [Symbol, :image_generation] The type of the image generation tool. Always `image_generation`.
-
-
# Background type for the generated image. One of `transparent`, `opaque`, or
-
# `auto`. Default: `auto`.
-
#
-
# @see OpenAI::Models::Responses::Tool::ImageGeneration#background
-
1
module Background
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TRANSPARENT = :transparent
-
1
OPAQUE = :opaque
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# Control how much effort the model will exert to match the style and features,
-
# especially facial features, of input images. This parameter is only supported
-
# for `gpt-image-1`. Supports `high` and `low`. Defaults to `low`.
-
#
-
# @see OpenAI::Models::Responses::Tool::ImageGeneration#input_fidelity
-
1
module InputFidelity
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
HIGH = :high
-
1
LOW = :low
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @see OpenAI::Models::Responses::Tool::ImageGeneration#input_image_mask
-
1
class InputImageMask < OpenAI::Internal::Type::BaseModel
-
# @!attribute file_id
-
# File ID for the mask image.
-
#
-
# @return [String, nil]
-
1
optional :file_id, String
-
-
# @!attribute image_url
-
# Base64-encoded mask image.
-
#
-
# @return [String, nil]
-
1
optional :image_url, String
-
-
# @!method initialize(file_id: nil, image_url: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::Tool::ImageGeneration::InputImageMask} for more
-
# details.
-
#
-
# Optional mask for inpainting. Contains `image_url` (string, optional) and
-
# `file_id` (string, optional).
-
#
-
# @param file_id [String] File ID for the mask image.
-
#
-
# @param image_url [String] Base64-encoded mask image.
-
end
-
-
# The image generation model to use. Default: `gpt-image-1`.
-
#
-
# @see OpenAI::Models::Responses::Tool::ImageGeneration#model
-
1
module Model
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
GPT_IMAGE_1 = :"gpt-image-1"
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# Moderation level for the generated image. Default: `auto`.
-
#
-
# @see OpenAI::Models::Responses::Tool::ImageGeneration#moderation
-
1
module Moderation
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
LOW = :low
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The output format of the generated image. One of `png`, `webp`, or `jpeg`.
-
# Default: `png`.
-
#
-
# @see OpenAI::Models::Responses::Tool::ImageGeneration#output_format
-
1
module OutputFormat
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
PNG = :png
-
1
WEBP = :webp
-
1
JPEG = :jpeg
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The quality of the generated image. One of `low`, `medium`, `high`, or `auto`.
-
# Default: `auto`.
-
#
-
# @see OpenAI::Models::Responses::Tool::ImageGeneration#quality
-
1
module Quality
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
LOW = :low
-
1
MEDIUM = :medium
-
1
HIGH = :high
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# The size of the generated image. One of `1024x1024`, `1024x1536`, `1536x1024`,
-
# or `auto`. Default: `auto`.
-
#
-
# @see OpenAI::Models::Responses::Tool::ImageGeneration#size
-
1
module Size
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
SIZE_1024X1024 = :"1024x1024"
-
1
SIZE_1024X1536 = :"1024x1536"
-
1
SIZE_1536X1024 = :"1536x1024"
-
1
AUTO = :auto
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
1
class LocalShell < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of the local shell tool. Always `local_shell`.
-
#
-
# @return [Symbol, :local_shell]
-
1
required :type, const: :local_shell
-
-
# @!method initialize(type: :local_shell)
-
# A tool that allows the model to execute shell commands in a local environment.
-
#
-
# @param type [Symbol, :local_shell] The type of the local shell tool. Always `local_shell`.
-
end
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ToolChoiceFunction < OpenAI::Internal::Type::BaseModel
-
# @!attribute name
-
# The name of the function to call.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute type
-
# For function calling, the type is always `function`.
-
#
-
# @return [Symbol, :function]
-
1
required :type, const: :function
-
-
# @!method initialize(name:, type: :function)
-
# Use this option to force the model to call a specific function.
-
#
-
# @param name [String] The name of the function to call.
-
#
-
# @param type [Symbol, :function] For function calling, the type is always `function`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ToolChoiceMcp < OpenAI::Internal::Type::BaseModel
-
# @!attribute server_label
-
# The label of the MCP server to use.
-
#
-
# @return [String]
-
1
required :server_label, String
-
-
# @!attribute type
-
# For MCP tools, the type is always `mcp`.
-
#
-
# @return [Symbol, :mcp]
-
1
required :type, const: :mcp
-
-
# @!attribute name
-
# The name of the tool to call on the server.
-
#
-
# @return [String, nil]
-
1
optional :name, String, nil?: true
-
-
# @!method initialize(server_label:, name: nil, type: :mcp)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ToolChoiceMcp} for more details.
-
#
-
# Use this option to force the model to call a specific tool on a remote MCP
-
# server.
-
#
-
# @param server_label [String] The label of the MCP server to use.
-
#
-
# @param name [String, nil] The name of the tool to call on the server.
-
#
-
# @param type [Symbol, :mcp] For MCP tools, the type is always `mcp`.
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
# Controls which (if any) tool is called by the model.
-
#
-
# `none` means the model will not call any tool and instead generates a message.
-
#
-
# `auto` means the model can pick between generating a message or calling one or
-
# more tools.
-
#
-
# `required` means the model must call one or more tools.
-
1
module ToolChoiceOptions
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
NONE = :none
-
1
AUTO = :auto
-
1
REQUIRED = :required
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class ToolChoiceTypes < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of hosted tool the model should to use. Learn more about
-
# [built-in tools](https://platform.openai.com/docs/guides/tools).
-
#
-
# Allowed values are:
-
#
-
# - `file_search`
-
# - `web_search_preview`
-
# - `computer_use_preview`
-
# - `code_interpreter`
-
# - `image_generation`
-
#
-
# @return [Symbol, OpenAI::Models::Responses::ToolChoiceTypes::Type]
-
1
required :type, enum: -> { OpenAI::Responses::ToolChoiceTypes::Type }
-
-
# @!method initialize(type:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ToolChoiceTypes} for more details.
-
#
-
# Indicates that the model should use a built-in tool to generate a response.
-
# [Learn more about built-in tools](https://platform.openai.com/docs/guides/tools).
-
#
-
# @param type [Symbol, OpenAI::Models::Responses::ToolChoiceTypes::Type] The type of hosted tool the model should to use. Learn more about
-
-
# The type of hosted tool the model should to use. Learn more about
-
# [built-in tools](https://platform.openai.com/docs/guides/tools).
-
#
-
# Allowed values are:
-
#
-
# - `file_search`
-
# - `web_search_preview`
-
# - `computer_use_preview`
-
# - `code_interpreter`
-
# - `image_generation`
-
#
-
# @see OpenAI::Models::Responses::ToolChoiceTypes#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
FILE_SEARCH = :file_search
-
1
WEB_SEARCH_PREVIEW = :web_search_preview
-
1
COMPUTER_USE_PREVIEW = :computer_use_preview
-
1
WEB_SEARCH_PREVIEW_2025_03_11 = :web_search_preview_2025_03_11
-
1
IMAGE_GENERATION = :image_generation
-
1
CODE_INTERPRETER = :code_interpreter
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Responses
-
1
class WebSearchTool < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of the web search tool. One of `web_search_preview` or
-
# `web_search_preview_2025_03_11`.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::WebSearchTool::Type]
-
1
required :type, enum: -> { OpenAI::Responses::WebSearchTool::Type }
-
-
# @!attribute search_context_size
-
# High level guidance for the amount of context window space to use for the
-
# search. One of `low`, `medium`, or `high`. `medium` is the default.
-
#
-
# @return [Symbol, OpenAI::Models::Responses::WebSearchTool::SearchContextSize, nil]
-
1
optional :search_context_size, enum: -> { OpenAI::Responses::WebSearchTool::SearchContextSize }
-
-
# @!attribute user_location
-
# The user's location.
-
#
-
# @return [OpenAI::Models::Responses::WebSearchTool::UserLocation, nil]
-
1
optional :user_location, -> { OpenAI::Responses::WebSearchTool::UserLocation }, nil?: true
-
-
# @!method initialize(type:, search_context_size: nil, user_location: nil)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::WebSearchTool} for more details.
-
#
-
# This tool searches the web for relevant results to use in a response. Learn more
-
# about the
-
# [web search tool](https://platform.openai.com/docs/guides/tools-web-search).
-
#
-
# @param type [Symbol, OpenAI::Models::Responses::WebSearchTool::Type] The type of the web search tool. One of `web_search_preview` or `web_search_prev
-
#
-
# @param search_context_size [Symbol, OpenAI::Models::Responses::WebSearchTool::SearchContextSize] High level guidance for the amount of context window space to use for the search
-
#
-
# @param user_location [OpenAI::Models::Responses::WebSearchTool::UserLocation, nil] The user's location.
-
-
# The type of the web search tool. One of `web_search_preview` or
-
# `web_search_preview_2025_03_11`.
-
#
-
# @see OpenAI::Models::Responses::WebSearchTool#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
WEB_SEARCH_PREVIEW = :web_search_preview
-
1
WEB_SEARCH_PREVIEW_2025_03_11 = :web_search_preview_2025_03_11
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# High level guidance for the amount of context window space to use for the
-
# search. One of `low`, `medium`, or `high`. `medium` is the default.
-
#
-
# @see OpenAI::Models::Responses::WebSearchTool#search_context_size
-
1
module SearchContextSize
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
LOW = :low
-
1
MEDIUM = :medium
-
1
HIGH = :high
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @see OpenAI::Models::Responses::WebSearchTool#user_location
-
1
class UserLocation < OpenAI::Internal::Type::BaseModel
-
# @!attribute type
-
# The type of location approximation. Always `approximate`.
-
#
-
# @return [Symbol, :approximate]
-
1
required :type, const: :approximate
-
-
# @!attribute city
-
# Free text input for the city of the user, e.g. `San Francisco`.
-
#
-
# @return [String, nil]
-
1
optional :city, String, nil?: true
-
-
# @!attribute country
-
# The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
-
# the user, e.g. `US`.
-
#
-
# @return [String, nil]
-
1
optional :country, String, nil?: true
-
-
# @!attribute region
-
# Free text input for the region of the user, e.g. `California`.
-
#
-
# @return [String, nil]
-
1
optional :region, String, nil?: true
-
-
# @!attribute timezone
-
# The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
-
# user, e.g. `America/Los_Angeles`.
-
#
-
# @return [String, nil]
-
1
optional :timezone, String, nil?: true
-
-
# @!method initialize(city: nil, country: nil, region: nil, timezone: nil, type: :approximate)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::WebSearchTool::UserLocation} for more details.
-
#
-
# The user's location.
-
#
-
# @param city [String, nil] Free text input for the city of the user, e.g. `San Francisco`.
-
#
-
# @param country [String, nil] The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of t
-
#
-
# @param region [String, nil] Free text input for the region of the user, e.g. `California`.
-
#
-
# @param timezone [String, nil] The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the user
-
#
-
# @param type [Symbol, :approximate] The type of location approximation. Always `approximate`.
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module ResponsesModel
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
1
variant enum: -> { OpenAI::ChatModel }
-
-
1
variant enum: -> { OpenAI::ResponsesModel::ResponsesOnlyModel }
-
-
1
module ResponsesOnlyModel
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
O1_PRO = :"o1-pro"
-
1
O1_PRO_2025_03_19 = :"o1-pro-2025-03-19"
-
1
O3_PRO = :"o3-pro"
-
1
O3_PRO_2025_06_10 = :"o3-pro-2025-06-10"
-
1
O3_DEEP_RESEARCH = :"o3-deep-research"
-
1
O3_DEEP_RESEARCH_2025_06_26 = :"o3-deep-research-2025-06-26"
-
1
O4_MINI_DEEP_RESEARCH = :"o4-mini-deep-research"
-
1
O4_MINI_DEEP_RESEARCH_2025_06_26 = :"o4-mini-deep-research-2025-06-26"
-
1
COMPUTER_USE_PREVIEW = :"computer-use-preview"
-
1
COMPUTER_USE_PREVIEW_2025_03_11 = :"computer-use-preview-2025-03-11"
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @!method self.variants
-
# @return [Array(String, Symbol, OpenAI::Models::ChatModel, Symbol, OpenAI::Models::ResponsesModel::ResponsesOnlyModel)]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class StaticFileChunkingStrategy < OpenAI::Internal::Type::BaseModel
-
# @!attribute chunk_overlap_tokens
-
# The number of tokens that overlap between chunks. The default value is `400`.
-
#
-
# Note that the overlap must not exceed half of `max_chunk_size_tokens`.
-
#
-
# @return [Integer]
-
1
required :chunk_overlap_tokens, Integer
-
-
# @!attribute max_chunk_size_tokens
-
# The maximum number of tokens in each chunk. The default value is `800`. The
-
# minimum value is `100` and the maximum value is `4096`.
-
#
-
# @return [Integer]
-
1
required :max_chunk_size_tokens, Integer
-
-
# @!method initialize(chunk_overlap_tokens:, max_chunk_size_tokens:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::StaticFileChunkingStrategy} for more details.
-
#
-
# @param chunk_overlap_tokens [Integer] The number of tokens that overlap between chunks. The default value is `400`.
-
#
-
# @param max_chunk_size_tokens [Integer] The maximum number of tokens in each chunk. The default value is `800`. The mini
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class StaticFileChunkingStrategyObject < OpenAI::Internal::Type::BaseModel
-
# @!attribute static
-
#
-
# @return [OpenAI::Models::StaticFileChunkingStrategy]
-
1
required :static, -> { OpenAI::StaticFileChunkingStrategy }
-
-
# @!attribute type
-
# Always `static`.
-
#
-
# @return [Symbol, :static]
-
1
required :type, const: :static
-
-
# @!method initialize(static:, type: :static)
-
# @param static [OpenAI::Models::StaticFileChunkingStrategy]
-
#
-
# @param type [Symbol, :static] Always `static`.
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
class StaticFileChunkingStrategyObjectParam < OpenAI::Internal::Type::BaseModel
-
# @!attribute static
-
#
-
# @return [OpenAI::Models::StaticFileChunkingStrategy]
-
1
required :static, -> { OpenAI::StaticFileChunkingStrategy }
-
-
# @!attribute type
-
# Always `static`.
-
#
-
# @return [Symbol, :static]
-
1
required :type, const: :static
-
-
# @!method initialize(static:, type: :static)
-
# Customize your own chunking strategy by setting chunk size and chunk overlap.
-
#
-
# @param static [OpenAI::Models::StaticFileChunkingStrategy]
-
#
-
# @param type [Symbol, :static] Always `static`.
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Uploads#create
-
1
class Upload < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The Upload unique identifier, which can be referenced in API endpoints.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute bytes
-
# The intended number of bytes to be uploaded.
-
#
-
# @return [Integer]
-
1
required :bytes, Integer
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the Upload was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute expires_at
-
# The Unix timestamp (in seconds) for when the Upload will expire.
-
#
-
# @return [Integer]
-
1
required :expires_at, Integer
-
-
# @!attribute filename
-
# The name of the file to be uploaded.
-
#
-
# @return [String]
-
1
required :filename, String
-
-
# @!attribute object
-
# The object type, which is always "upload".
-
#
-
# @return [Symbol, :upload]
-
1
required :object, const: :upload
-
-
# @!attribute purpose
-
# The intended purpose of the file.
-
# [Please refer here](https://platform.openai.com/docs/api-reference/files/object#files/object-purpose)
-
# for acceptable values.
-
#
-
# @return [String]
-
1
required :purpose, String
-
-
# @!attribute status
-
# The status of the Upload.
-
#
-
# @return [Symbol, OpenAI::Models::Upload::Status]
-
1
required :status, enum: -> { OpenAI::Upload::Status }
-
-
# @!attribute file
-
# The `File` object represents a document that has been uploaded to OpenAI.
-
#
-
# @return [OpenAI::Models::FileObject, nil]
-
1
optional :file, -> { OpenAI::FileObject }, nil?: true
-
-
# @!method initialize(id:, bytes:, created_at:, expires_at:, filename:, purpose:, status:, file: nil, object: :upload)
-
# Some parameter documentations has been truncated, see {OpenAI::Models::Upload}
-
# for more details.
-
#
-
# The Upload object can accept byte chunks in the form of Parts.
-
#
-
# @param id [String] The Upload unique identifier, which can be referenced in API endpoints.
-
#
-
# @param bytes [Integer] The intended number of bytes to be uploaded.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the Upload was created.
-
#
-
# @param expires_at [Integer] The Unix timestamp (in seconds) for when the Upload will expire.
-
#
-
# @param filename [String] The name of the file to be uploaded.
-
#
-
# @param purpose [String] The intended purpose of the file. [Please refer here](https://platform.openai.co
-
#
-
# @param status [Symbol, OpenAI::Models::Upload::Status] The status of the Upload.
-
#
-
# @param file [OpenAI::Models::FileObject, nil] The `File` object represents a document that has been uploaded to OpenAI.
-
#
-
# @param object [Symbol, :upload] The object type, which is always "upload".
-
-
# The status of the Upload.
-
#
-
# @see OpenAI::Models::Upload#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
PENDING = :pending
-
1
COMPLETED = :completed
-
1
CANCELLED = :cancelled
-
1
EXPIRED = :expired
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Uploads#cancel
-
1
class UploadCancelParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Uploads#complete
-
1
class UploadCompleteParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute part_ids
-
# The ordered list of Part IDs.
-
#
-
# @return [Array<String>]
-
1
required :part_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute md5
-
# The optional md5 checksum for the file contents to verify if the bytes uploaded
-
# matches what you expect.
-
#
-
# @return [String, nil]
-
1
optional :md5, String
-
-
# @!method initialize(part_ids:, md5: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::UploadCompleteParams} for more details.
-
#
-
# @param part_ids [Array<String>] The ordered list of Part IDs.
-
#
-
# @param md5 [String] The optional md5 checksum for the file contents to verify if the bytes uploaded
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::Uploads#create
-
1
class UploadCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute bytes
-
# The number of bytes in the file you are uploading.
-
#
-
# @return [Integer]
-
1
required :bytes, Integer
-
-
# @!attribute filename
-
# The name of the file to upload.
-
#
-
# @return [String]
-
1
required :filename, String
-
-
# @!attribute mime_type
-
# The MIME type of the file.
-
#
-
# This must fall within the supported MIME types for your file purpose. See the
-
# supported MIME types for assistants and vision.
-
#
-
# @return [String]
-
1
required :mime_type, String
-
-
# @!attribute purpose
-
# The intended purpose of the uploaded file.
-
#
-
# See the
-
# [documentation on File purposes](https://platform.openai.com/docs/api-reference/files/create#files-create-purpose).
-
#
-
# @return [Symbol, OpenAI::Models::FilePurpose]
-
1
required :purpose, enum: -> { OpenAI::FilePurpose }
-
-
# @!method initialize(bytes:, filename:, mime_type:, purpose:, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::UploadCreateParams} for more details.
-
#
-
# @param bytes [Integer] The number of bytes in the file you are uploading.
-
#
-
# @param filename [String] The name of the file to upload.
-
#
-
# @param mime_type [String] The MIME type of the file.
-
#
-
# @param purpose [Symbol, OpenAI::Models::FilePurpose] The intended purpose of the uploaded file.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Uploads
-
# @see OpenAI::Resources::Uploads::Parts#create
-
1
class PartCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute data
-
# The chunk of bytes for this Part.
-
#
-
# @return [Pathname, StringIO, IO, String, OpenAI::FilePart]
-
1
required :data, OpenAI::Internal::Type::FileInput
-
-
# @!method initialize(data:, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Uploads::PartCreateParams} for more details.
-
#
-
# @param data [Pathname, StringIO, IO, String, OpenAI::FilePart] The chunk of bytes for this Part.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Uploads
-
# @see OpenAI::Resources::Uploads::Parts#create
-
1
class UploadPart < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The upload Part unique identifier, which can be referenced in API endpoints.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the Part was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute object
-
# The object type, which is always `upload.part`.
-
#
-
# @return [Symbol, :"upload.part"]
-
1
required :object, const: :"upload.part"
-
-
# @!attribute upload_id
-
# The ID of the Upload object that this Part was added to.
-
#
-
# @return [String]
-
1
required :upload_id, String
-
-
# @!method initialize(id:, created_at:, upload_id:, object: :"upload.part")
-
# The upload Part represents a chunk of bytes we can add to an Upload object.
-
#
-
# @param id [String] The upload Part unique identifier, which can be referenced in API endpoints.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the Part was created.
-
#
-
# @param upload_id [String] The ID of the Upload object that this Part was added to.
-
#
-
# @param object [Symbol, :"upload.part"] The object type, which is always `upload.part`.
-
end
-
end
-
-
1
UploadPart = Uploads::UploadPart
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::VectorStores#create
-
1
class VectorStore < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The identifier, which can be referenced in API endpoints.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the vector store was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute file_counts
-
#
-
# @return [OpenAI::Models::VectorStore::FileCounts]
-
1
required :file_counts, -> { OpenAI::VectorStore::FileCounts }
-
-
# @!attribute last_active_at
-
# The Unix timestamp (in seconds) for when the vector store was last active.
-
#
-
# @return [Integer, nil]
-
1
required :last_active_at, Integer, nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
required :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute name
-
# The name of the vector store.
-
#
-
# @return [String]
-
1
required :name, String
-
-
# @!attribute object
-
# The object type, which is always `vector_store`.
-
#
-
# @return [Symbol, :vector_store]
-
1
required :object, const: :vector_store
-
-
# @!attribute status
-
# The status of the vector store, which can be either `expired`, `in_progress`, or
-
# `completed`. A status of `completed` indicates that the vector store is ready
-
# for use.
-
#
-
# @return [Symbol, OpenAI::Models::VectorStore::Status]
-
1
required :status, enum: -> { OpenAI::VectorStore::Status }
-
-
# @!attribute usage_bytes
-
# The total number of bytes used by the files in the vector store.
-
#
-
# @return [Integer]
-
1
required :usage_bytes, Integer
-
-
# @!attribute expires_after
-
# The expiration policy for a vector store.
-
#
-
# @return [OpenAI::Models::VectorStore::ExpiresAfter, nil]
-
1
optional :expires_after, -> { OpenAI::VectorStore::ExpiresAfter }
-
-
# @!attribute expires_at
-
# The Unix timestamp (in seconds) for when the vector store will expire.
-
#
-
# @return [Integer, nil]
-
1
optional :expires_at, Integer, nil?: true
-
-
# @!method initialize(id:, created_at:, file_counts:, last_active_at:, metadata:, name:, status:, usage_bytes:, expires_after: nil, expires_at: nil, object: :vector_store)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStore} for more details.
-
#
-
# A vector store is a collection of processed files can be used by the
-
# `file_search` tool.
-
#
-
# @param id [String] The identifier, which can be referenced in API endpoints.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the vector store was created.
-
#
-
# @param file_counts [OpenAI::Models::VectorStore::FileCounts]
-
#
-
# @param last_active_at [Integer, nil] The Unix timestamp (in seconds) for when the vector store was last active.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param name [String] The name of the vector store.
-
#
-
# @param status [Symbol, OpenAI::Models::VectorStore::Status] The status of the vector store, which can be either `expired`, `in_progress`, or
-
#
-
# @param usage_bytes [Integer] The total number of bytes used by the files in the vector store.
-
#
-
# @param expires_after [OpenAI::Models::VectorStore::ExpiresAfter] The expiration policy for a vector store.
-
#
-
# @param expires_at [Integer, nil] The Unix timestamp (in seconds) for when the vector store will expire.
-
#
-
# @param object [Symbol, :vector_store] The object type, which is always `vector_store`.
-
-
# @see OpenAI::Models::VectorStore#file_counts
-
1
class FileCounts < OpenAI::Internal::Type::BaseModel
-
# @!attribute cancelled
-
# The number of files that were cancelled.
-
#
-
# @return [Integer]
-
1
required :cancelled, Integer
-
-
# @!attribute completed
-
# The number of files that have been successfully processed.
-
#
-
# @return [Integer]
-
1
required :completed, Integer
-
-
# @!attribute failed
-
# The number of files that have failed to process.
-
#
-
# @return [Integer]
-
1
required :failed, Integer
-
-
# @!attribute in_progress
-
# The number of files that are currently being processed.
-
#
-
# @return [Integer]
-
1
required :in_progress, Integer
-
-
# @!attribute total
-
# The total number of files.
-
#
-
# @return [Integer]
-
1
required :total, Integer
-
-
# @!method initialize(cancelled:, completed:, failed:, in_progress:, total:)
-
# @param cancelled [Integer] The number of files that were cancelled.
-
#
-
# @param completed [Integer] The number of files that have been successfully processed.
-
#
-
# @param failed [Integer] The number of files that have failed to process.
-
#
-
# @param in_progress [Integer] The number of files that are currently being processed.
-
#
-
# @param total [Integer] The total number of files.
-
end
-
-
# The status of the vector store, which can be either `expired`, `in_progress`, or
-
# `completed`. A status of `completed` indicates that the vector store is ready
-
# for use.
-
#
-
# @see OpenAI::Models::VectorStore#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
EXPIRED = :expired
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# @see OpenAI::Models::VectorStore#expires_after
-
1
class ExpiresAfter < OpenAI::Internal::Type::BaseModel
-
# @!attribute anchor
-
# Anchor timestamp after which the expiration policy applies. Supported anchors:
-
# `last_active_at`.
-
#
-
# @return [Symbol, :last_active_at]
-
1
required :anchor, const: :last_active_at
-
-
# @!attribute days
-
# The number of days after the anchor time that the vector store will expire.
-
#
-
# @return [Integer]
-
1
required :days, Integer
-
-
# @!method initialize(days:, anchor: :last_active_at)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStore::ExpiresAfter} for more details.
-
#
-
# The expiration policy for a vector store.
-
#
-
# @param days [Integer] The number of days after the anchor time that the vector store will expire.
-
#
-
# @param anchor [Symbol, :last_active_at] Anchor timestamp after which the expiration policy applies. Supported anchors: `
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::VectorStores#create
-
1
class VectorStoreCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute chunking_strategy
-
# The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
# strategy. Only applicable if `file_ids` is non-empty.
-
#
-
# @return [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam, nil]
-
1
optional :chunking_strategy, union: -> { OpenAI::FileChunkingStrategyParam }
-
-
# @!attribute expires_after
-
# The expiration policy for a vector store.
-
#
-
# @return [OpenAI::Models::VectorStoreCreateParams::ExpiresAfter, nil]
-
1
optional :expires_after, -> { OpenAI::VectorStoreCreateParams::ExpiresAfter }
-
-
# @!attribute file_ids
-
# A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
-
# the vector store should use. Useful for tools like `file_search` that can access
-
# files.
-
#
-
# @return [Array<String>, nil]
-
1
optional :file_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute name
-
# The name of the vector store.
-
#
-
# @return [String, nil]
-
1
optional :name, String
-
-
# @!method initialize(chunking_strategy: nil, expires_after: nil, file_ids: nil, metadata: nil, name: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStoreCreateParams} for more details.
-
#
-
# @param chunking_strategy [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
#
-
# @param expires_after [OpenAI::Models::VectorStoreCreateParams::ExpiresAfter] The expiration policy for a vector store.
-
#
-
# @param file_ids [Array<String>] A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param name [String] The name of the vector store.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
1
class ExpiresAfter < OpenAI::Internal::Type::BaseModel
-
# @!attribute anchor
-
# Anchor timestamp after which the expiration policy applies. Supported anchors:
-
# `last_active_at`.
-
#
-
# @return [Symbol, :last_active_at]
-
1
required :anchor, const: :last_active_at
-
-
# @!attribute days
-
# The number of days after the anchor time that the vector store will expire.
-
#
-
# @return [Integer]
-
1
required :days, Integer
-
-
# @!method initialize(days:, anchor: :last_active_at)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStoreCreateParams::ExpiresAfter} for more details.
-
#
-
# The expiration policy for a vector store.
-
#
-
# @param days [Integer] The number of days after the anchor time that the vector store will expire.
-
#
-
# @param anchor [Symbol, :last_active_at] Anchor timestamp after which the expiration policy applies. Supported anchors: `
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::VectorStores#delete
-
1
class VectorStoreDeleteParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::VectorStores#delete
-
1
class VectorStoreDeleted < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute deleted
-
#
-
# @return [Boolean]
-
1
required :deleted, OpenAI::Internal::Type::Boolean
-
-
# @!attribute object
-
#
-
# @return [Symbol, :"vector_store.deleted"]
-
1
required :object, const: :"vector_store.deleted"
-
-
# @!method initialize(id:, deleted:, object: :"vector_store.deleted")
-
# @param id [String]
-
# @param deleted [Boolean]
-
# @param object [Symbol, :"vector_store.deleted"]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::VectorStores#list
-
1
class VectorStoreListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute after
-
# A cursor for use in pagination. `after` is an object ID that defines your place
-
# in the list. For instance, if you make a list request and receive 100 objects,
-
# ending with obj_foo, your subsequent call can include after=obj_foo in order to
-
# fetch the next page of the list.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute before
-
# A cursor for use in pagination. `before` is an object ID that defines your place
-
# in the list. For instance, if you make a list request and receive 100 objects,
-
# starting with obj_foo, your subsequent call can include before=obj_foo in order
-
# to fetch the previous page of the list.
-
#
-
# @return [String, nil]
-
1
optional :before, String
-
-
# @!attribute limit
-
# A limit on the number of objects to be returned. Limit can range between 1 and
-
# 100, and the default is 20.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!attribute order
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
#
-
# @return [Symbol, OpenAI::Models::VectorStoreListParams::Order, nil]
-
1
optional :order, enum: -> { OpenAI::VectorStoreListParams::Order }
-
-
# @!method initialize(after: nil, before: nil, limit: nil, order: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStoreListParams} for more details.
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param order [Symbol, OpenAI::Models::VectorStoreListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
1
module Order
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASC = :asc
-
1
DESC = :desc
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::VectorStores#retrieve
-
1
class VectorStoreRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::VectorStores#search
-
1
class VectorStoreSearchParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute query
-
# A query string for a search
-
#
-
# @return [String, Array<String>]
-
1
required :query, union: -> { OpenAI::VectorStoreSearchParams::Query }
-
-
# @!attribute filters
-
# A filter to apply based on file attributes.
-
#
-
# @return [OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter, nil]
-
1
optional :filters, union: -> { OpenAI::VectorStoreSearchParams::Filters }
-
-
# @!attribute max_num_results
-
# The maximum number of results to return. This number should be between 1 and 50
-
# inclusive.
-
#
-
# @return [Integer, nil]
-
1
optional :max_num_results, Integer
-
-
# @!attribute ranking_options
-
# Ranking options for search.
-
#
-
# @return [OpenAI::Models::VectorStoreSearchParams::RankingOptions, nil]
-
1
optional :ranking_options, -> { OpenAI::VectorStoreSearchParams::RankingOptions }
-
-
# @!attribute rewrite_query
-
# Whether to rewrite the natural language query for vector search.
-
#
-
# @return [Boolean, nil]
-
1
optional :rewrite_query, OpenAI::Internal::Type::Boolean
-
-
# @!method initialize(query:, filters: nil, max_num_results: nil, ranking_options: nil, rewrite_query: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStoreSearchParams} for more details.
-
#
-
# @param query [String, Array<String>] A query string for a search
-
#
-
# @param filters [OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter] A filter to apply based on file attributes.
-
#
-
# @param max_num_results [Integer] The maximum number of results to return. This number should be between 1 and 50
-
#
-
# @param ranking_options [OpenAI::Models::VectorStoreSearchParams::RankingOptions] Ranking options for search.
-
#
-
# @param rewrite_query [Boolean] Whether to rewrite the natural language query for vector search.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# A query string for a search
-
1
module Query
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
1
variant -> { OpenAI::Models::VectorStoreSearchParams::Query::StringArray }
-
-
# @!method self.variants
-
# @return [Array(String, Array<String>)]
-
-
# @type [OpenAI::Internal::Type::Converter]
-
1
StringArray = OpenAI::Internal::Type::ArrayOf[String]
-
end
-
-
# A filter to apply based on file attributes.
-
1
module Filters
-
1
extend OpenAI::Internal::Type::Union
-
-
# A filter used to compare a specified attribute key to a given value using a defined comparison operation.
-
1
variant -> { OpenAI::ComparisonFilter }
-
-
# Combine multiple filters using `and` or `or`.
-
1
variant -> { OpenAI::CompoundFilter }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter)]
-
end
-
-
1
class RankingOptions < OpenAI::Internal::Type::BaseModel
-
# @!attribute ranker
-
#
-
# @return [Symbol, OpenAI::Models::VectorStoreSearchParams::RankingOptions::Ranker, nil]
-
1
optional :ranker, enum: -> { OpenAI::VectorStoreSearchParams::RankingOptions::Ranker }
-
-
# @!attribute score_threshold
-
#
-
# @return [Float, nil]
-
1
optional :score_threshold, Float
-
-
# @!method initialize(ranker: nil, score_threshold: nil)
-
# Ranking options for search.
-
#
-
# @param ranker [Symbol, OpenAI::Models::VectorStoreSearchParams::RankingOptions::Ranker]
-
# @param score_threshold [Float]
-
-
# @see OpenAI::Models::VectorStoreSearchParams::RankingOptions#ranker
-
1
module Ranker
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
AUTO = :auto
-
1
DEFAULT_2024_11_15 = :"default-2024-11-15"
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::VectorStores#search
-
1
class VectorStoreSearchResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute attributes
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard. Keys are strings with a maximum
-
# length of 64 characters. Values are strings with a maximum length of 512
-
# characters, booleans, or numbers.
-
#
-
# @return [Hash{Symbol=>String, Float, Boolean}, nil]
-
1
required :attributes,
-
-> { OpenAI::Internal::Type::HashOf[union: OpenAI::Models::VectorStoreSearchResponse::Attribute] },
-
nil?: true
-
-
# @!attribute content
-
# Content chunks from the file.
-
#
-
# @return [Array<OpenAI::Models::VectorStoreSearchResponse::Content>]
-
1
required :content,
-
-> { OpenAI::Internal::Type::ArrayOf[OpenAI::Models::VectorStoreSearchResponse::Content] }
-
-
# @!attribute file_id
-
# The ID of the vector store file.
-
#
-
# @return [String]
-
1
required :file_id, String
-
-
# @!attribute filename
-
# The name of the vector store file.
-
#
-
# @return [String]
-
1
required :filename, String
-
-
# @!attribute score
-
# The similarity score for the result.
-
#
-
# @return [Float]
-
1
required :score, Float
-
-
# @!method initialize(attributes:, content:, file_id:, filename:, score:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStoreSearchResponse} for more details.
-
#
-
# @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param content [Array<OpenAI::Models::VectorStoreSearchResponse::Content>] Content chunks from the file.
-
#
-
# @param file_id [String] The ID of the vector store file.
-
#
-
# @param filename [String] The name of the vector store file.
-
#
-
# @param score [Float] The similarity score for the result.
-
-
1
module Attribute
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
1
variant Float
-
-
1
variant OpenAI::Internal::Type::Boolean
-
-
# @!method self.variants
-
# @return [Array(String, Float, Boolean)]
-
end
-
-
1
class Content < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The text content returned from search.
-
#
-
# @return [String]
-
1
required :text, String
-
-
# @!attribute type
-
# The type of content.
-
#
-
# @return [Symbol, OpenAI::Models::VectorStoreSearchResponse::Content::Type]
-
1
required :type, enum: -> { OpenAI::Models::VectorStoreSearchResponse::Content::Type }
-
-
# @!method initialize(text:, type:)
-
# @param text [String] The text content returned from search.
-
#
-
# @param type [Symbol, OpenAI::Models::VectorStoreSearchResponse::Content::Type] The type of content.
-
-
# The type of content.
-
#
-
# @see OpenAI::Models::VectorStoreSearchResponse::Content#type
-
1
module Type
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
TEXT = :text
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
# @see OpenAI::Resources::VectorStores#update
-
1
class VectorStoreUpdateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute expires_after
-
# The expiration policy for a vector store.
-
#
-
# @return [OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter, nil]
-
1
optional :expires_after, -> { OpenAI::VectorStoreUpdateParams::ExpiresAfter }, nil?: true
-
-
# @!attribute metadata
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard.
-
#
-
# Keys are strings with a maximum length of 64 characters. Values are strings with
-
# a maximum length of 512 characters.
-
#
-
# @return [Hash{Symbol=>String}, nil]
-
1
optional :metadata, OpenAI::Internal::Type::HashOf[String], nil?: true
-
-
# @!attribute name
-
# The name of the vector store.
-
#
-
# @return [String, nil]
-
1
optional :name, String, nil?: true
-
-
# @!method initialize(expires_after: nil, metadata: nil, name: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStoreUpdateParams} for more details.
-
#
-
# @param expires_after [OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter, nil] The expiration policy for a vector store.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param name [String, nil] The name of the vector store.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
1
class ExpiresAfter < OpenAI::Internal::Type::BaseModel
-
# @!attribute anchor
-
# Anchor timestamp after which the expiration policy applies. Supported anchors:
-
# `last_active_at`.
-
#
-
# @return [Symbol, :last_active_at]
-
1
required :anchor, const: :last_active_at
-
-
# @!attribute days
-
# The number of days after the anchor time that the vector store will expire.
-
#
-
# @return [Integer]
-
1
required :days, Integer
-
-
# @!method initialize(days:, anchor: :last_active_at)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter} for more details.
-
#
-
# The expiration policy for a vector store.
-
#
-
# @param days [Integer] The number of days after the anchor time that the vector store will expire.
-
#
-
# @param anchor [Symbol, :last_active_at] Anchor timestamp after which the expiration policy applies. Supported anchors: `
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module VectorStores
-
# @see OpenAI::Resources::VectorStores::FileBatches#cancel
-
1
class FileBatchCancelParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute vector_store_id
-
#
-
# @return [String]
-
1
required :vector_store_id, String
-
-
# @!method initialize(vector_store_id:, request_options: {})
-
# @param vector_store_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module VectorStores
-
# @see OpenAI::Resources::VectorStores::FileBatches#create
-
1
class FileBatchCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute file_ids
-
# A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
-
# the vector store should use. Useful for tools like `file_search` that can access
-
# files.
-
#
-
# @return [Array<String>]
-
1
required :file_ids, OpenAI::Internal::Type::ArrayOf[String]
-
-
# @!attribute attributes
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard. Keys are strings with a maximum
-
# length of 64 characters. Values are strings with a maximum length of 512
-
# characters, booleans, or numbers.
-
#
-
# @return [Hash{Symbol=>String, Float, Boolean}, nil]
-
1
optional :attributes,
-
-> {
-
OpenAI::Internal::Type::HashOf[union: OpenAI::VectorStores::FileBatchCreateParams::Attribute]
-
},
-
nil?: true
-
-
# @!attribute chunking_strategy
-
# The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
# strategy. Only applicable if `file_ids` is non-empty.
-
#
-
# @return [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam, nil]
-
1
optional :chunking_strategy, union: -> { OpenAI::FileChunkingStrategyParam }
-
-
# @!method initialize(file_ids:, attributes: nil, chunking_strategy: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStores::FileBatchCreateParams} for more details.
-
#
-
# @param file_ids [Array<String>] A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
-
#
-
# @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param chunking_strategy [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
1
module Attribute
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
1
variant Float
-
-
1
variant OpenAI::Internal::Type::Boolean
-
-
# @!method self.variants
-
# @return [Array(String, Float, Boolean)]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module VectorStores
-
# @see OpenAI::Resources::VectorStores::FileBatches#list_files
-
1
class FileBatchListFilesParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute vector_store_id
-
#
-
# @return [String]
-
1
required :vector_store_id, String
-
-
# @!attribute after
-
# A cursor for use in pagination. `after` is an object ID that defines your place
-
# in the list. For instance, if you make a list request and receive 100 objects,
-
# ending with obj_foo, your subsequent call can include after=obj_foo in order to
-
# fetch the next page of the list.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute before
-
# A cursor for use in pagination. `before` is an object ID that defines your place
-
# in the list. For instance, if you make a list request and receive 100 objects,
-
# starting with obj_foo, your subsequent call can include before=obj_foo in order
-
# to fetch the previous page of the list.
-
#
-
# @return [String, nil]
-
1
optional :before, String
-
-
# @!attribute filter
-
# Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
-
#
-
# @return [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Filter, nil]
-
1
optional :filter, enum: -> { OpenAI::VectorStores::FileBatchListFilesParams::Filter }
-
-
# @!attribute limit
-
# A limit on the number of objects to be returned. Limit can range between 1 and
-
# 100, and the default is 20.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!attribute order
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
#
-
# @return [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Order, nil]
-
1
optional :order, enum: -> { OpenAI::VectorStores::FileBatchListFilesParams::Order }
-
-
# @!method initialize(vector_store_id:, after: nil, before: nil, filter: nil, limit: nil, order: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStores::FileBatchListFilesParams} for more details.
-
#
-
# @param vector_store_id [String]
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place
-
#
-
# @param filter [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Filter] Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param order [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
-
1
module Filter
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
FAILED = :failed
-
1
CANCELLED = :cancelled
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
1
module Order
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASC = :asc
-
1
DESC = :desc
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module VectorStores
-
# @see OpenAI::Resources::VectorStores::FileBatches#retrieve
-
1
class FileBatchRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute vector_store_id
-
#
-
# @return [String]
-
1
required :vector_store_id, String
-
-
# @!method initialize(vector_store_id:, request_options: {})
-
# @param vector_store_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module VectorStores
-
# @see OpenAI::Resources::VectorStores::Files#content
-
1
class FileContentParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute vector_store_id
-
#
-
# @return [String]
-
1
required :vector_store_id, String
-
-
# @!method initialize(vector_store_id:, request_options: {})
-
# @param vector_store_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module VectorStores
-
# @see OpenAI::Resources::VectorStores::Files#content
-
1
class FileContentResponse < OpenAI::Internal::Type::BaseModel
-
# @!attribute text
-
# The text content
-
#
-
# @return [String, nil]
-
1
optional :text, String
-
-
# @!attribute type
-
# The content type (currently only `"text"`)
-
#
-
# @return [String, nil]
-
1
optional :type, String
-
-
# @!method initialize(text: nil, type: nil)
-
# @param text [String] The text content
-
#
-
# @param type [String] The content type (currently only `"text"`)
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module VectorStores
-
# @see OpenAI::Resources::VectorStores::Files#create
-
1
class FileCreateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute file_id
-
# A [File](https://platform.openai.com/docs/api-reference/files) ID that the
-
# vector store should use. Useful for tools like `file_search` that can access
-
# files.
-
#
-
# @return [String]
-
1
required :file_id, String
-
-
# @!attribute attributes
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard. Keys are strings with a maximum
-
# length of 64 characters. Values are strings with a maximum length of 512
-
# characters, booleans, or numbers.
-
#
-
# @return [Hash{Symbol=>String, Float, Boolean}, nil]
-
1
optional :attributes,
-
-> {
-
OpenAI::Internal::Type::HashOf[union: OpenAI::VectorStores::FileCreateParams::Attribute]
-
},
-
nil?: true
-
-
# @!attribute chunking_strategy
-
# The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
# strategy. Only applicable if `file_ids` is non-empty.
-
#
-
# @return [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam, nil]
-
1
optional :chunking_strategy, union: -> { OpenAI::FileChunkingStrategyParam }
-
-
# @!method initialize(file_id:, attributes: nil, chunking_strategy: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStores::FileCreateParams} for more details.
-
#
-
# @param file_id [String] A [File](https://platform.openai.com/docs/api-reference/files) ID that the vecto
-
#
-
# @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param chunking_strategy [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
1
module Attribute
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
1
variant Float
-
-
1
variant OpenAI::Internal::Type::Boolean
-
-
# @!method self.variants
-
# @return [Array(String, Float, Boolean)]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module VectorStores
-
# @see OpenAI::Resources::VectorStores::Files#delete
-
1
class FileDeleteParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute vector_store_id
-
#
-
# @return [String]
-
1
required :vector_store_id, String
-
-
# @!method initialize(vector_store_id:, request_options: {})
-
# @param vector_store_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module VectorStores
-
# @see OpenAI::Resources::VectorStores::Files#list
-
1
class FileListParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute after
-
# A cursor for use in pagination. `after` is an object ID that defines your place
-
# in the list. For instance, if you make a list request and receive 100 objects,
-
# ending with obj_foo, your subsequent call can include after=obj_foo in order to
-
# fetch the next page of the list.
-
#
-
# @return [String, nil]
-
1
optional :after, String
-
-
# @!attribute before
-
# A cursor for use in pagination. `before` is an object ID that defines your place
-
# in the list. For instance, if you make a list request and receive 100 objects,
-
# starting with obj_foo, your subsequent call can include before=obj_foo in order
-
# to fetch the previous page of the list.
-
#
-
# @return [String, nil]
-
1
optional :before, String
-
-
# @!attribute filter
-
# Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
-
#
-
# @return [Symbol, OpenAI::Models::VectorStores::FileListParams::Filter, nil]
-
1
optional :filter, enum: -> { OpenAI::VectorStores::FileListParams::Filter }
-
-
# @!attribute limit
-
# A limit on the number of objects to be returned. Limit can range between 1 and
-
# 100, and the default is 20.
-
#
-
# @return [Integer, nil]
-
1
optional :limit, Integer
-
-
# @!attribute order
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
#
-
# @return [Symbol, OpenAI::Models::VectorStores::FileListParams::Order, nil]
-
1
optional :order, enum: -> { OpenAI::VectorStores::FileListParams::Order }
-
-
# @!method initialize(after: nil, before: nil, filter: nil, limit: nil, order: nil, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStores::FileListParams} for more details.
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place
-
#
-
# @param filter [Symbol, OpenAI::Models::VectorStores::FileListParams::Filter] Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param order [Symbol, OpenAI::Models::VectorStores::FileListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
# Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
-
1
module Filter
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
FAILED = :failed
-
1
CANCELLED = :cancelled
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
# Sort order by the `created_at` timestamp of the objects. `asc` for ascending
-
# order and `desc` for descending order.
-
1
module Order
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
ASC = :asc
-
1
DESC = :desc
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module VectorStores
-
# @see OpenAI::Resources::VectorStores::Files#retrieve
-
1
class FileRetrieveParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute vector_store_id
-
#
-
# @return [String]
-
1
required :vector_store_id, String
-
-
# @!method initialize(vector_store_id:, request_options: {})
-
# @param vector_store_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module VectorStores
-
# @see OpenAI::Resources::VectorStores::Files#update
-
1
class FileUpdateParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!attribute vector_store_id
-
#
-
# @return [String]
-
1
required :vector_store_id, String
-
-
# @!attribute attributes
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard. Keys are strings with a maximum
-
# length of 64 characters. Values are strings with a maximum length of 512
-
# characters, booleans, or numbers.
-
#
-
# @return [Hash{Symbol=>String, Float, Boolean}, nil]
-
1
required :attributes,
-
-> {
-
OpenAI::Internal::Type::HashOf[union: OpenAI::VectorStores::FileUpdateParams::Attribute]
-
},
-
nil?: true
-
-
# @!method initialize(vector_store_id:, attributes:, request_options: {})
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStores::FileUpdateParams} for more details.
-
#
-
# @param vector_store_id [String]
-
#
-
# @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
-
1
module Attribute
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
1
variant Float
-
-
1
variant OpenAI::Internal::Type::Boolean
-
-
# @!method self.variants
-
# @return [Array(String, Float, Boolean)]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module VectorStores
-
# @see OpenAI::Resources::VectorStores::Files#create
-
1
class VectorStoreFile < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The identifier, which can be referenced in API endpoints.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the vector store file was created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute last_error
-
# The last error associated with this vector store file. Will be `null` if there
-
# are no errors.
-
#
-
# @return [OpenAI::Models::VectorStores::VectorStoreFile::LastError, nil]
-
1
required :last_error, -> { OpenAI::VectorStores::VectorStoreFile::LastError }, nil?: true
-
-
# @!attribute object
-
# The object type, which is always `vector_store.file`.
-
#
-
# @return [Symbol, :"vector_store.file"]
-
1
required :object, const: :"vector_store.file"
-
-
# @!attribute status
-
# The status of the vector store file, which can be either `in_progress`,
-
# `completed`, `cancelled`, or `failed`. The status `completed` indicates that the
-
# vector store file is ready for use.
-
#
-
# @return [Symbol, OpenAI::Models::VectorStores::VectorStoreFile::Status]
-
1
required :status, enum: -> { OpenAI::VectorStores::VectorStoreFile::Status }
-
-
# @!attribute usage_bytes
-
# The total vector store usage in bytes. Note that this may be different from the
-
# original file size.
-
#
-
# @return [Integer]
-
1
required :usage_bytes, Integer
-
-
# @!attribute vector_store_id
-
# The ID of the
-
# [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
-
# that the [File](https://platform.openai.com/docs/api-reference/files) is
-
# attached to.
-
#
-
# @return [String]
-
1
required :vector_store_id, String
-
-
# @!attribute attributes
-
# Set of 16 key-value pairs that can be attached to an object. This can be useful
-
# for storing additional information about the object in a structured format, and
-
# querying for objects via API or the dashboard. Keys are strings with a maximum
-
# length of 64 characters. Values are strings with a maximum length of 512
-
# characters, booleans, or numbers.
-
#
-
# @return [Hash{Symbol=>String, Float, Boolean}, nil]
-
1
optional :attributes,
-
-> {
-
OpenAI::Internal::Type::HashOf[union: OpenAI::VectorStores::VectorStoreFile::Attribute]
-
},
-
nil?: true
-
-
# @!attribute chunking_strategy
-
# The strategy used to chunk the file.
-
#
-
# @return [OpenAI::Models::StaticFileChunkingStrategyObject, OpenAI::Models::OtherFileChunkingStrategyObject, nil]
-
1
optional :chunking_strategy, union: -> { OpenAI::FileChunkingStrategy }
-
-
# @!method initialize(id:, created_at:, last_error:, status:, usage_bytes:, vector_store_id:, attributes: nil, chunking_strategy: nil, object: :"vector_store.file")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStores::VectorStoreFile} for more details.
-
#
-
# A list of files attached to a vector store.
-
#
-
# @param id [String] The identifier, which can be referenced in API endpoints.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the vector store file was created.
-
#
-
# @param last_error [OpenAI::Models::VectorStores::VectorStoreFile::LastError, nil] The last error associated with this vector store file. Will be `null` if there a
-
#
-
# @param status [Symbol, OpenAI::Models::VectorStores::VectorStoreFile::Status] The status of the vector store file, which can be either `in_progress`, `complet
-
#
-
# @param usage_bytes [Integer] The total vector store usage in bytes. Note that this may be different from the
-
#
-
# @param vector_store_id [String] The ID of the [vector store](https://platform.openai.com/docs/api-reference/vect
-
#
-
# @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param chunking_strategy [OpenAI::Models::StaticFileChunkingStrategyObject, OpenAI::Models::OtherFileChunkingStrategyObject] The strategy used to chunk the file.
-
#
-
# @param object [Symbol, :"vector_store.file"] The object type, which is always `vector_store.file`.
-
-
# @see OpenAI::Models::VectorStores::VectorStoreFile#last_error
-
1
class LastError < OpenAI::Internal::Type::BaseModel
-
# @!attribute code
-
# One of `server_error` or `rate_limit_exceeded`.
-
#
-
# @return [Symbol, OpenAI::Models::VectorStores::VectorStoreFile::LastError::Code]
-
1
required :code, enum: -> { OpenAI::VectorStores::VectorStoreFile::LastError::Code }
-
-
# @!attribute message
-
# A human-readable description of the error.
-
#
-
# @return [String]
-
1
required :message, String
-
-
# @!method initialize(code:, message:)
-
# The last error associated with this vector store file. Will be `null` if there
-
# are no errors.
-
#
-
# @param code [Symbol, OpenAI::Models::VectorStores::VectorStoreFile::LastError::Code] One of `server_error` or `rate_limit_exceeded`.
-
#
-
# @param message [String] A human-readable description of the error.
-
-
# One of `server_error` or `rate_limit_exceeded`.
-
#
-
# @see OpenAI::Models::VectorStores::VectorStoreFile::LastError#code
-
1
module Code
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
SERVER_ERROR = :server_error
-
1
UNSUPPORTED_FILE = :unsupported_file
-
1
INVALID_FILE = :invalid_file
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
-
# The status of the vector store file, which can be either `in_progress`,
-
# `completed`, `cancelled`, or `failed`. The status `completed` indicates that the
-
# vector store file is ready for use.
-
#
-
# @see OpenAI::Models::VectorStores::VectorStoreFile#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
CANCELLED = :cancelled
-
1
FAILED = :failed
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
-
1
module Attribute
-
1
extend OpenAI::Internal::Type::Union
-
-
1
variant String
-
-
1
variant Float
-
-
1
variant OpenAI::Internal::Type::Boolean
-
-
# @!method self.variants
-
# @return [Array(String, Float, Boolean)]
-
end
-
end
-
end
-
-
1
VectorStoreFile = VectorStores::VectorStoreFile
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module VectorStores
-
# @see OpenAI::Resources::VectorStores::FileBatches#create
-
1
class VectorStoreFileBatch < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The identifier, which can be referenced in API endpoints.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) for when the vector store files batch was
-
# created.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute file_counts
-
#
-
# @return [OpenAI::Models::VectorStores::VectorStoreFileBatch::FileCounts]
-
1
required :file_counts, -> { OpenAI::VectorStores::VectorStoreFileBatch::FileCounts }
-
-
# @!attribute object
-
# The object type, which is always `vector_store.file_batch`.
-
#
-
# @return [Symbol, :"vector_store.files_batch"]
-
1
required :object, const: :"vector_store.files_batch"
-
-
# @!attribute status
-
# The status of the vector store files batch, which can be either `in_progress`,
-
# `completed`, `cancelled` or `failed`.
-
#
-
# @return [Symbol, OpenAI::Models::VectorStores::VectorStoreFileBatch::Status]
-
1
required :status, enum: -> { OpenAI::VectorStores::VectorStoreFileBatch::Status }
-
-
# @!attribute vector_store_id
-
# The ID of the
-
# [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object)
-
# that the [File](https://platform.openai.com/docs/api-reference/files) is
-
# attached to.
-
#
-
# @return [String]
-
1
required :vector_store_id, String
-
-
# @!method initialize(id:, created_at:, file_counts:, status:, vector_store_id:, object: :"vector_store.files_batch")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStores::VectorStoreFileBatch} for more details.
-
#
-
# A batch of files attached to a vector store.
-
#
-
# @param id [String] The identifier, which can be referenced in API endpoints.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) for when the vector store files batch was create
-
#
-
# @param file_counts [OpenAI::Models::VectorStores::VectorStoreFileBatch::FileCounts]
-
#
-
# @param status [Symbol, OpenAI::Models::VectorStores::VectorStoreFileBatch::Status] The status of the vector store files batch, which can be either `in_progress`, `
-
#
-
# @param vector_store_id [String] The ID of the [vector store](https://platform.openai.com/docs/api-reference/vect
-
#
-
# @param object [Symbol, :"vector_store.files_batch"] The object type, which is always `vector_store.file_batch`.
-
-
# @see OpenAI::Models::VectorStores::VectorStoreFileBatch#file_counts
-
1
class FileCounts < OpenAI::Internal::Type::BaseModel
-
# @!attribute cancelled
-
# The number of files that where cancelled.
-
#
-
# @return [Integer]
-
1
required :cancelled, Integer
-
-
# @!attribute completed
-
# The number of files that have been processed.
-
#
-
# @return [Integer]
-
1
required :completed, Integer
-
-
# @!attribute failed
-
# The number of files that have failed to process.
-
#
-
# @return [Integer]
-
1
required :failed, Integer
-
-
# @!attribute in_progress
-
# The number of files that are currently being processed.
-
#
-
# @return [Integer]
-
1
required :in_progress, Integer
-
-
# @!attribute total
-
# The total number of files.
-
#
-
# @return [Integer]
-
1
required :total, Integer
-
-
# @!method initialize(cancelled:, completed:, failed:, in_progress:, total:)
-
# @param cancelled [Integer] The number of files that where cancelled.
-
#
-
# @param completed [Integer] The number of files that have been processed.
-
#
-
# @param failed [Integer] The number of files that have failed to process.
-
#
-
# @param in_progress [Integer] The number of files that are currently being processed.
-
#
-
# @param total [Integer] The total number of files.
-
end
-
-
# The status of the vector store files batch, which can be either `in_progress`,
-
# `completed`, `cancelled` or `failed`.
-
#
-
# @see OpenAI::Models::VectorStores::VectorStoreFileBatch#status
-
1
module Status
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
IN_PROGRESS = :in_progress
-
1
COMPLETED = :completed
-
1
CANCELLED = :cancelled
-
1
FAILED = :failed
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
-
1
VectorStoreFileBatch = VectorStores::VectorStoreFileBatch
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module VectorStores
-
# @see OpenAI::Resources::VectorStores::Files#delete
-
1
class VectorStoreFileDeleted < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute deleted
-
#
-
# @return [Boolean]
-
1
required :deleted, OpenAI::Internal::Type::Boolean
-
-
# @!attribute object
-
#
-
# @return [Symbol, :"vector_store.file.deleted"]
-
1
required :object, const: :"vector_store.file.deleted"
-
-
# @!method initialize(id:, deleted:, object: :"vector_store.file.deleted")
-
# @param id [String]
-
# @param deleted [Boolean]
-
# @param object [Symbol, :"vector_store.file.deleted"]
-
end
-
end
-
-
1
VectorStoreFileDeleted = VectorStores::VectorStoreFileDeleted
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Webhooks
-
1
class BatchCancelledWebhookEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the event.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) of when the batch API request was cancelled.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data
-
# Event data payload.
-
#
-
# @return [OpenAI::Models::Webhooks::BatchCancelledWebhookEvent::Data]
-
1
required :data, -> { OpenAI::Webhooks::BatchCancelledWebhookEvent::Data }
-
-
# @!attribute type
-
# The type of the event. Always `batch.cancelled`.
-
#
-
# @return [Symbol, :"batch.cancelled"]
-
1
required :type, const: :"batch.cancelled"
-
-
# @!attribute object
-
# The object of the event. Always `event`.
-
#
-
# @return [Symbol, OpenAI::Models::Webhooks::BatchCancelledWebhookEvent::Object, nil]
-
1
optional :object, enum: -> { OpenAI::Webhooks::BatchCancelledWebhookEvent::Object }
-
-
# @!method initialize(id:, created_at:, data:, object: nil, type: :"batch.cancelled")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::BatchCancelledWebhookEvent} for more details.
-
#
-
# Sent when a batch API request has been cancelled.
-
#
-
# @param id [String] The unique ID of the event.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) of when the batch API request was cancelled.
-
#
-
# @param data [OpenAI::Models::Webhooks::BatchCancelledWebhookEvent::Data] Event data payload.
-
#
-
# @param object [Symbol, OpenAI::Models::Webhooks::BatchCancelledWebhookEvent::Object] The object of the event. Always `event`.
-
#
-
# @param type [Symbol, :"batch.cancelled"] The type of the event. Always `batch.cancelled`.
-
-
# @see OpenAI::Models::Webhooks::BatchCancelledWebhookEvent#data
-
1
class Data < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the batch API request.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!method initialize(id:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::BatchCancelledWebhookEvent::Data} for more details.
-
#
-
# Event data payload.
-
#
-
# @param id [String] The unique ID of the batch API request.
-
end
-
-
# The object of the event. Always `event`.
-
#
-
# @see OpenAI::Models::Webhooks::BatchCancelledWebhookEvent#object
-
1
module Object
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
EVENT = :event
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Webhooks
-
1
class BatchCompletedWebhookEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the event.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) of when the batch API request was completed.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data
-
# Event data payload.
-
#
-
# @return [OpenAI::Models::Webhooks::BatchCompletedWebhookEvent::Data]
-
1
required :data, -> { OpenAI::Webhooks::BatchCompletedWebhookEvent::Data }
-
-
# @!attribute type
-
# The type of the event. Always `batch.completed`.
-
#
-
# @return [Symbol, :"batch.completed"]
-
1
required :type, const: :"batch.completed"
-
-
# @!attribute object
-
# The object of the event. Always `event`.
-
#
-
# @return [Symbol, OpenAI::Models::Webhooks::BatchCompletedWebhookEvent::Object, nil]
-
1
optional :object, enum: -> { OpenAI::Webhooks::BatchCompletedWebhookEvent::Object }
-
-
# @!method initialize(id:, created_at:, data:, object: nil, type: :"batch.completed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::BatchCompletedWebhookEvent} for more details.
-
#
-
# Sent when a batch API request has been completed.
-
#
-
# @param id [String] The unique ID of the event.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) of when the batch API request was completed.
-
#
-
# @param data [OpenAI::Models::Webhooks::BatchCompletedWebhookEvent::Data] Event data payload.
-
#
-
# @param object [Symbol, OpenAI::Models::Webhooks::BatchCompletedWebhookEvent::Object] The object of the event. Always `event`.
-
#
-
# @param type [Symbol, :"batch.completed"] The type of the event. Always `batch.completed`.
-
-
# @see OpenAI::Models::Webhooks::BatchCompletedWebhookEvent#data
-
1
class Data < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the batch API request.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!method initialize(id:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::BatchCompletedWebhookEvent::Data} for more details.
-
#
-
# Event data payload.
-
#
-
# @param id [String] The unique ID of the batch API request.
-
end
-
-
# The object of the event. Always `event`.
-
#
-
# @see OpenAI::Models::Webhooks::BatchCompletedWebhookEvent#object
-
1
module Object
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
EVENT = :event
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Webhooks
-
1
class BatchExpiredWebhookEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the event.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) of when the batch API request expired.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data
-
# Event data payload.
-
#
-
# @return [OpenAI::Models::Webhooks::BatchExpiredWebhookEvent::Data]
-
1
required :data, -> { OpenAI::Webhooks::BatchExpiredWebhookEvent::Data }
-
-
# @!attribute type
-
# The type of the event. Always `batch.expired`.
-
#
-
# @return [Symbol, :"batch.expired"]
-
1
required :type, const: :"batch.expired"
-
-
# @!attribute object
-
# The object of the event. Always `event`.
-
#
-
# @return [Symbol, OpenAI::Models::Webhooks::BatchExpiredWebhookEvent::Object, nil]
-
1
optional :object, enum: -> { OpenAI::Webhooks::BatchExpiredWebhookEvent::Object }
-
-
# @!method initialize(id:, created_at:, data:, object: nil, type: :"batch.expired")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::BatchExpiredWebhookEvent} for more details.
-
#
-
# Sent when a batch API request has expired.
-
#
-
# @param id [String] The unique ID of the event.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) of when the batch API request expired.
-
#
-
# @param data [OpenAI::Models::Webhooks::BatchExpiredWebhookEvent::Data] Event data payload.
-
#
-
# @param object [Symbol, OpenAI::Models::Webhooks::BatchExpiredWebhookEvent::Object] The object of the event. Always `event`.
-
#
-
# @param type [Symbol, :"batch.expired"] The type of the event. Always `batch.expired`.
-
-
# @see OpenAI::Models::Webhooks::BatchExpiredWebhookEvent#data
-
1
class Data < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the batch API request.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!method initialize(id:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::BatchExpiredWebhookEvent::Data} for more details.
-
#
-
# Event data payload.
-
#
-
# @param id [String] The unique ID of the batch API request.
-
end
-
-
# The object of the event. Always `event`.
-
#
-
# @see OpenAI::Models::Webhooks::BatchExpiredWebhookEvent#object
-
1
module Object
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
EVENT = :event
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Webhooks
-
1
class BatchFailedWebhookEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the event.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) of when the batch API request failed.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data
-
# Event data payload.
-
#
-
# @return [OpenAI::Models::Webhooks::BatchFailedWebhookEvent::Data]
-
1
required :data, -> { OpenAI::Webhooks::BatchFailedWebhookEvent::Data }
-
-
# @!attribute type
-
# The type of the event. Always `batch.failed`.
-
#
-
# @return [Symbol, :"batch.failed"]
-
1
required :type, const: :"batch.failed"
-
-
# @!attribute object
-
# The object of the event. Always `event`.
-
#
-
# @return [Symbol, OpenAI::Models::Webhooks::BatchFailedWebhookEvent::Object, nil]
-
1
optional :object, enum: -> { OpenAI::Webhooks::BatchFailedWebhookEvent::Object }
-
-
# @!method initialize(id:, created_at:, data:, object: nil, type: :"batch.failed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::BatchFailedWebhookEvent} for more details.
-
#
-
# Sent when a batch API request has failed.
-
#
-
# @param id [String] The unique ID of the event.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) of when the batch API request failed.
-
#
-
# @param data [OpenAI::Models::Webhooks::BatchFailedWebhookEvent::Data] Event data payload.
-
#
-
# @param object [Symbol, OpenAI::Models::Webhooks::BatchFailedWebhookEvent::Object] The object of the event. Always `event`.
-
#
-
# @param type [Symbol, :"batch.failed"] The type of the event. Always `batch.failed`.
-
-
# @see OpenAI::Models::Webhooks::BatchFailedWebhookEvent#data
-
1
class Data < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the batch API request.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!method initialize(id:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::BatchFailedWebhookEvent::Data} for more details.
-
#
-
# Event data payload.
-
#
-
# @param id [String] The unique ID of the batch API request.
-
end
-
-
# The object of the event. Always `event`.
-
#
-
# @see OpenAI::Models::Webhooks::BatchFailedWebhookEvent#object
-
1
module Object
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
EVENT = :event
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Webhooks
-
1
class EvalRunCanceledWebhookEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the event.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) of when the eval run was canceled.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data
-
# Event data payload.
-
#
-
# @return [OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent::Data]
-
1
required :data, -> { OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Data }
-
-
# @!attribute type
-
# The type of the event. Always `eval.run.canceled`.
-
#
-
# @return [Symbol, :"eval.run.canceled"]
-
1
required :type, const: :"eval.run.canceled"
-
-
# @!attribute object
-
# The object of the event. Always `event`.
-
#
-
# @return [Symbol, OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent::Object, nil]
-
1
optional :object, enum: -> { OpenAI::Webhooks::EvalRunCanceledWebhookEvent::Object }
-
-
# @!method initialize(id:, created_at:, data:, object: nil, type: :"eval.run.canceled")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent} for more details.
-
#
-
# Sent when an eval run has been canceled.
-
#
-
# @param id [String] The unique ID of the event.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) of when the eval run was canceled.
-
#
-
# @param data [OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent::Data] Event data payload.
-
#
-
# @param object [Symbol, OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent::Object] The object of the event. Always `event`.
-
#
-
# @param type [Symbol, :"eval.run.canceled"] The type of the event. Always `eval.run.canceled`.
-
-
# @see OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent#data
-
1
class Data < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the eval run.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!method initialize(id:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent::Data} for more details.
-
#
-
# Event data payload.
-
#
-
# @param id [String] The unique ID of the eval run.
-
end
-
-
# The object of the event. Always `event`.
-
#
-
# @see OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent#object
-
1
module Object
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
EVENT = :event
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Webhooks
-
1
class EvalRunFailedWebhookEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the event.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) of when the eval run failed.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data
-
# Event data payload.
-
#
-
# @return [OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent::Data]
-
1
required :data, -> { OpenAI::Webhooks::EvalRunFailedWebhookEvent::Data }
-
-
# @!attribute type
-
# The type of the event. Always `eval.run.failed`.
-
#
-
# @return [Symbol, :"eval.run.failed"]
-
1
required :type, const: :"eval.run.failed"
-
-
# @!attribute object
-
# The object of the event. Always `event`.
-
#
-
# @return [Symbol, OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent::Object, nil]
-
1
optional :object, enum: -> { OpenAI::Webhooks::EvalRunFailedWebhookEvent::Object }
-
-
# @!method initialize(id:, created_at:, data:, object: nil, type: :"eval.run.failed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent} for more details.
-
#
-
# Sent when an eval run has failed.
-
#
-
# @param id [String] The unique ID of the event.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) of when the eval run failed.
-
#
-
# @param data [OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent::Data] Event data payload.
-
#
-
# @param object [Symbol, OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent::Object] The object of the event. Always `event`.
-
#
-
# @param type [Symbol, :"eval.run.failed"] The type of the event. Always `eval.run.failed`.
-
-
# @see OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent#data
-
1
class Data < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the eval run.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!method initialize(id:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent::Data} for more details.
-
#
-
# Event data payload.
-
#
-
# @param id [String] The unique ID of the eval run.
-
end
-
-
# The object of the event. Always `event`.
-
#
-
# @see OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent#object
-
1
module Object
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
EVENT = :event
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Webhooks
-
1
class EvalRunSucceededWebhookEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the event.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) of when the eval run succeeded.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data
-
# Event data payload.
-
#
-
# @return [OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent::Data]
-
1
required :data, -> { OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Data }
-
-
# @!attribute type
-
# The type of the event. Always `eval.run.succeeded`.
-
#
-
# @return [Symbol, :"eval.run.succeeded"]
-
1
required :type, const: :"eval.run.succeeded"
-
-
# @!attribute object
-
# The object of the event. Always `event`.
-
#
-
# @return [Symbol, OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent::Object, nil]
-
1
optional :object, enum: -> { OpenAI::Webhooks::EvalRunSucceededWebhookEvent::Object }
-
-
# @!method initialize(id:, created_at:, data:, object: nil, type: :"eval.run.succeeded")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent} for more details.
-
#
-
# Sent when an eval run has succeeded.
-
#
-
# @param id [String] The unique ID of the event.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) of when the eval run succeeded.
-
#
-
# @param data [OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent::Data] Event data payload.
-
#
-
# @param object [Symbol, OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent::Object] The object of the event. Always `event`.
-
#
-
# @param type [Symbol, :"eval.run.succeeded"] The type of the event. Always `eval.run.succeeded`.
-
-
# @see OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent#data
-
1
class Data < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the eval run.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!method initialize(id:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent::Data} for more details.
-
#
-
# Event data payload.
-
#
-
# @param id [String] The unique ID of the eval run.
-
end
-
-
# The object of the event. Always `event`.
-
#
-
# @see OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent#object
-
1
module Object
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
EVENT = :event
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Webhooks
-
1
class FineTuningJobCancelledWebhookEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the event.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) of when the fine-tuning job was cancelled.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data
-
# Event data payload.
-
#
-
# @return [OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent::Data]
-
1
required :data, -> { OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Data }
-
-
# @!attribute type
-
# The type of the event. Always `fine_tuning.job.cancelled`.
-
#
-
# @return [Symbol, :"fine_tuning.job.cancelled"]
-
1
required :type, const: :"fine_tuning.job.cancelled"
-
-
# @!attribute object
-
# The object of the event. Always `event`.
-
#
-
# @return [Symbol, OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent::Object, nil]
-
1
optional :object, enum: -> { OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent::Object }
-
-
# @!method initialize(id:, created_at:, data:, object: nil, type: :"fine_tuning.job.cancelled")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent} for more details.
-
#
-
# Sent when a fine-tuning job has been cancelled.
-
#
-
# @param id [String] The unique ID of the event.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) of when the fine-tuning job was cancelled.
-
#
-
# @param data [OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent::Data] Event data payload.
-
#
-
# @param object [Symbol, OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent::Object] The object of the event. Always `event`.
-
#
-
# @param type [Symbol, :"fine_tuning.job.cancelled"] The type of the event. Always `fine_tuning.job.cancelled`.
-
-
# @see OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent#data
-
1
class Data < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the fine-tuning job.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!method initialize(id:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent::Data} for more
-
# details.
-
#
-
# Event data payload.
-
#
-
# @param id [String] The unique ID of the fine-tuning job.
-
end
-
-
# The object of the event. Always `event`.
-
#
-
# @see OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent#object
-
1
module Object
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
EVENT = :event
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Webhooks
-
1
class FineTuningJobFailedWebhookEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the event.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) of when the fine-tuning job failed.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data
-
# Event data payload.
-
#
-
# @return [OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent::Data]
-
1
required :data, -> { OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Data }
-
-
# @!attribute type
-
# The type of the event. Always `fine_tuning.job.failed`.
-
#
-
# @return [Symbol, :"fine_tuning.job.failed"]
-
1
required :type, const: :"fine_tuning.job.failed"
-
-
# @!attribute object
-
# The object of the event. Always `event`.
-
#
-
# @return [Symbol, OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent::Object, nil]
-
1
optional :object, enum: -> { OpenAI::Webhooks::FineTuningJobFailedWebhookEvent::Object }
-
-
# @!method initialize(id:, created_at:, data:, object: nil, type: :"fine_tuning.job.failed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent} for more details.
-
#
-
# Sent when a fine-tuning job has failed.
-
#
-
# @param id [String] The unique ID of the event.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) of when the fine-tuning job failed.
-
#
-
# @param data [OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent::Data] Event data payload.
-
#
-
# @param object [Symbol, OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent::Object] The object of the event. Always `event`.
-
#
-
# @param type [Symbol, :"fine_tuning.job.failed"] The type of the event. Always `fine_tuning.job.failed`.
-
-
# @see OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent#data
-
1
class Data < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the fine-tuning job.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!method initialize(id:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent::Data} for more
-
# details.
-
#
-
# Event data payload.
-
#
-
# @param id [String] The unique ID of the fine-tuning job.
-
end
-
-
# The object of the event. Always `event`.
-
#
-
# @see OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent#object
-
1
module Object
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
EVENT = :event
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Webhooks
-
1
class FineTuningJobSucceededWebhookEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the event.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) of when the fine-tuning job succeeded.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data
-
# Event data payload.
-
#
-
# @return [OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent::Data]
-
1
required :data, -> { OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Data }
-
-
# @!attribute type
-
# The type of the event. Always `fine_tuning.job.succeeded`.
-
#
-
# @return [Symbol, :"fine_tuning.job.succeeded"]
-
1
required :type, const: :"fine_tuning.job.succeeded"
-
-
# @!attribute object
-
# The object of the event. Always `event`.
-
#
-
# @return [Symbol, OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent::Object, nil]
-
1
optional :object, enum: -> { OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent::Object }
-
-
# @!method initialize(id:, created_at:, data:, object: nil, type: :"fine_tuning.job.succeeded")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent} for more details.
-
#
-
# Sent when a fine-tuning job has succeeded.
-
#
-
# @param id [String] The unique ID of the event.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) of when the fine-tuning job succeeded.
-
#
-
# @param data [OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent::Data] Event data payload.
-
#
-
# @param object [Symbol, OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent::Object] The object of the event. Always `event`.
-
#
-
# @param type [Symbol, :"fine_tuning.job.succeeded"] The type of the event. Always `fine_tuning.job.succeeded`.
-
-
# @see OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent#data
-
1
class Data < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the fine-tuning job.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!method initialize(id:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent::Data} for more
-
# details.
-
#
-
# Event data payload.
-
#
-
# @param id [String] The unique ID of the fine-tuning job.
-
end
-
-
# The object of the event. Always `event`.
-
#
-
# @see OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent#object
-
1
module Object
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
EVENT = :event
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Webhooks
-
1
class ResponseCancelledWebhookEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the event.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) of when the model response was cancelled.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data
-
# Event data payload.
-
#
-
# @return [OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent::Data]
-
1
required :data, -> { OpenAI::Webhooks::ResponseCancelledWebhookEvent::Data }
-
-
# @!attribute type
-
# The type of the event. Always `response.cancelled`.
-
#
-
# @return [Symbol, :"response.cancelled"]
-
1
required :type, const: :"response.cancelled"
-
-
# @!attribute object
-
# The object of the event. Always `event`.
-
#
-
# @return [Symbol, OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent::Object, nil]
-
1
optional :object, enum: -> { OpenAI::Webhooks::ResponseCancelledWebhookEvent::Object }
-
-
# @!method initialize(id:, created_at:, data:, object: nil, type: :"response.cancelled")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent} for more details.
-
#
-
# Sent when a background response has been cancelled.
-
#
-
# @param id [String] The unique ID of the event.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) of when the model response was cancelled.
-
#
-
# @param data [OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent::Data] Event data payload.
-
#
-
# @param object [Symbol, OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent::Object] The object of the event. Always `event`.
-
#
-
# @param type [Symbol, :"response.cancelled"] The type of the event. Always `response.cancelled`.
-
-
# @see OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent#data
-
1
class Data < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the model response.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!method initialize(id:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent::Data} for more
-
# details.
-
#
-
# Event data payload.
-
#
-
# @param id [String] The unique ID of the model response.
-
end
-
-
# The object of the event. Always `event`.
-
#
-
# @see OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent#object
-
1
module Object
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
EVENT = :event
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Webhooks
-
1
class ResponseCompletedWebhookEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the event.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) of when the model response was completed.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data
-
# Event data payload.
-
#
-
# @return [OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent::Data]
-
1
required :data, -> { OpenAI::Webhooks::ResponseCompletedWebhookEvent::Data }
-
-
# @!attribute type
-
# The type of the event. Always `response.completed`.
-
#
-
# @return [Symbol, :"response.completed"]
-
1
required :type, const: :"response.completed"
-
-
# @!attribute object
-
# The object of the event. Always `event`.
-
#
-
# @return [Symbol, OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent::Object, nil]
-
1
optional :object, enum: -> { OpenAI::Webhooks::ResponseCompletedWebhookEvent::Object }
-
-
# @!method initialize(id:, created_at:, data:, object: nil, type: :"response.completed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent} for more details.
-
#
-
# Sent when a background response has been completed.
-
#
-
# @param id [String] The unique ID of the event.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) of when the model response was completed.
-
#
-
# @param data [OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent::Data] Event data payload.
-
#
-
# @param object [Symbol, OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent::Object] The object of the event. Always `event`.
-
#
-
# @param type [Symbol, :"response.completed"] The type of the event. Always `response.completed`.
-
-
# @see OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent#data
-
1
class Data < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the model response.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!method initialize(id:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent::Data} for more
-
# details.
-
#
-
# Event data payload.
-
#
-
# @param id [String] The unique ID of the model response.
-
end
-
-
# The object of the event. Always `event`.
-
#
-
# @see OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent#object
-
1
module Object
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
EVENT = :event
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Webhooks
-
1
class ResponseFailedWebhookEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the event.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) of when the model response failed.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data
-
# Event data payload.
-
#
-
# @return [OpenAI::Models::Webhooks::ResponseFailedWebhookEvent::Data]
-
1
required :data, -> { OpenAI::Webhooks::ResponseFailedWebhookEvent::Data }
-
-
# @!attribute type
-
# The type of the event. Always `response.failed`.
-
#
-
# @return [Symbol, :"response.failed"]
-
1
required :type, const: :"response.failed"
-
-
# @!attribute object
-
# The object of the event. Always `event`.
-
#
-
# @return [Symbol, OpenAI::Models::Webhooks::ResponseFailedWebhookEvent::Object, nil]
-
1
optional :object, enum: -> { OpenAI::Webhooks::ResponseFailedWebhookEvent::Object }
-
-
# @!method initialize(id:, created_at:, data:, object: nil, type: :"response.failed")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::ResponseFailedWebhookEvent} for more details.
-
#
-
# Sent when a background response has failed.
-
#
-
# @param id [String] The unique ID of the event.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) of when the model response failed.
-
#
-
# @param data [OpenAI::Models::Webhooks::ResponseFailedWebhookEvent::Data] Event data payload.
-
#
-
# @param object [Symbol, OpenAI::Models::Webhooks::ResponseFailedWebhookEvent::Object] The object of the event. Always `event`.
-
#
-
# @param type [Symbol, :"response.failed"] The type of the event. Always `response.failed`.
-
-
# @see OpenAI::Models::Webhooks::ResponseFailedWebhookEvent#data
-
1
class Data < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the model response.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!method initialize(id:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::ResponseFailedWebhookEvent::Data} for more details.
-
#
-
# Event data payload.
-
#
-
# @param id [String] The unique ID of the model response.
-
end
-
-
# The object of the event. Always `event`.
-
#
-
# @see OpenAI::Models::Webhooks::ResponseFailedWebhookEvent#object
-
1
module Object
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
EVENT = :event
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Webhooks
-
1
class ResponseIncompleteWebhookEvent < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the event.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!attribute created_at
-
# The Unix timestamp (in seconds) of when the model response was interrupted.
-
#
-
# @return [Integer]
-
1
required :created_at, Integer
-
-
# @!attribute data
-
# Event data payload.
-
#
-
# @return [OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent::Data]
-
1
required :data, -> { OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Data }
-
-
# @!attribute type
-
# The type of the event. Always `response.incomplete`.
-
#
-
# @return [Symbol, :"response.incomplete"]
-
1
required :type, const: :"response.incomplete"
-
-
# @!attribute object
-
# The object of the event. Always `event`.
-
#
-
# @return [Symbol, OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent::Object, nil]
-
1
optional :object, enum: -> { OpenAI::Webhooks::ResponseIncompleteWebhookEvent::Object }
-
-
# @!method initialize(id:, created_at:, data:, object: nil, type: :"response.incomplete")
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent} for more details.
-
#
-
# Sent when a background response has been interrupted.
-
#
-
# @param id [String] The unique ID of the event.
-
#
-
# @param created_at [Integer] The Unix timestamp (in seconds) of when the model response was interrupted.
-
#
-
# @param data [OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent::Data] Event data payload.
-
#
-
# @param object [Symbol, OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent::Object] The object of the event. Always `event`.
-
#
-
# @param type [Symbol, :"response.incomplete"] The type of the event. Always `response.incomplete`.
-
-
# @see OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent#data
-
1
class Data < OpenAI::Internal::Type::BaseModel
-
# @!attribute id
-
# The unique ID of the model response.
-
#
-
# @return [String]
-
1
required :id, String
-
-
# @!method initialize(id:)
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent::Data} for more
-
# details.
-
#
-
# Event data payload.
-
#
-
# @param id [String] The unique ID of the model response.
-
end
-
-
# The object of the event. Always `event`.
-
#
-
# @see OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent#object
-
1
module Object
-
1
extend OpenAI::Internal::Type::Enum
-
-
1
EVENT = :event
-
-
# @!method self.values
-
# @return [Array<Symbol>]
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Webhooks
-
# Sent when a batch API request has been cancelled.
-
1
module UnwrapWebhookEvent
-
1
extend OpenAI::Internal::Type::Union
-
-
1
discriminator :type
-
-
# Sent when a batch API request has been cancelled.
-
1
variant :"batch.cancelled", -> { OpenAI::Webhooks::BatchCancelledWebhookEvent }
-
-
# Sent when a batch API request has been completed.
-
1
variant :"batch.completed", -> { OpenAI::Webhooks::BatchCompletedWebhookEvent }
-
-
# Sent when a batch API request has expired.
-
1
variant :"batch.expired", -> { OpenAI::Webhooks::BatchExpiredWebhookEvent }
-
-
# Sent when a batch API request has failed.
-
1
variant :"batch.failed", -> { OpenAI::Webhooks::BatchFailedWebhookEvent }
-
-
# Sent when an eval run has been canceled.
-
1
variant :"eval.run.canceled", -> { OpenAI::Webhooks::EvalRunCanceledWebhookEvent }
-
-
# Sent when an eval run has failed.
-
1
variant :"eval.run.failed", -> { OpenAI::Webhooks::EvalRunFailedWebhookEvent }
-
-
# Sent when an eval run has succeeded.
-
1
variant :"eval.run.succeeded", -> { OpenAI::Webhooks::EvalRunSucceededWebhookEvent }
-
-
# Sent when a fine-tuning job has been cancelled.
-
1
variant :"fine_tuning.job.cancelled", -> { OpenAI::Webhooks::FineTuningJobCancelledWebhookEvent }
-
-
# Sent when a fine-tuning job has failed.
-
1
variant :"fine_tuning.job.failed", -> { OpenAI::Webhooks::FineTuningJobFailedWebhookEvent }
-
-
# Sent when a fine-tuning job has succeeded.
-
1
variant :"fine_tuning.job.succeeded", -> { OpenAI::Webhooks::FineTuningJobSucceededWebhookEvent }
-
-
# Sent when a background response has been cancelled.
-
1
variant :"response.cancelled", -> { OpenAI::Webhooks::ResponseCancelledWebhookEvent }
-
-
# Sent when a background response has been completed.
-
1
variant :"response.completed", -> { OpenAI::Webhooks::ResponseCompletedWebhookEvent }
-
-
# Sent when a background response has failed.
-
1
variant :"response.failed", -> { OpenAI::Webhooks::ResponseFailedWebhookEvent }
-
-
# Sent when a background response has been interrupted.
-
1
variant :"response.incomplete", -> { OpenAI::Webhooks::ResponseIncompleteWebhookEvent }
-
-
# @!method self.variants
-
# @return [Array(OpenAI::Models::Webhooks::BatchCancelledWebhookEvent, OpenAI::Models::Webhooks::BatchCompletedWebhookEvent, OpenAI::Models::Webhooks::BatchExpiredWebhookEvent, OpenAI::Models::Webhooks::BatchFailedWebhookEvent, OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent, OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent, OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent, OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent, OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent, OpenAI::Models::Webhooks::ResponseFailedWebhookEvent, OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent)]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Models
-
1
module Webhooks
-
# @see OpenAI::Resources::Webhooks#unwrap
-
1
class WebhookUnwrapParams < OpenAI::Internal::Type::BaseModel
-
1
extend OpenAI::Internal::Type::RequestParameters::Converter
-
1
include OpenAI::Internal::Type::RequestParameters
-
-
# @!method initialize(request_options: {})
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
# Specify HTTP behaviour to use for a specific request. These options supplement
-
# or override those provided at the client level.
-
#
-
# When making a request, you can pass an actual {RequestOptions} instance, or
-
# simply pass a Hash with symbol keys matching the attributes on this class.
-
1
class RequestOptions < OpenAI::Internal::Type::BaseModel
-
# @api private
-
#
-
# @param opts [OpenAI::RequestOptions, Hash{Symbol=>Object}]
-
#
-
# @raise [ArgumentError]
-
1
def self.validate!(opts)
-
case opts
-
2
in: 2
in OpenAI::RequestOptions | Hash
-
2
opts.to_h.each_key do |k|
-
else: 0
then: 0
unless fields.include?(k)
-
raise ArgumentError.new("Request `opts` keys must be one of #{fields.keys}, got #{k.inspect}")
-
end
-
end
-
else: 0
else
-
raise ArgumentError.new("Request `opts` must be a Hash or RequestOptions, got #{opts.inspect}")
-
end
-
end
-
-
# @!attribute idempotency_key
-
# Idempotency key to send with request and all associated retries. Will only be
-
# sent for write requests.
-
#
-
# @return [String, nil]
-
1
optional :idempotency_key, String
-
-
# @!attribute extra_query
-
# Extra query params to send with the request. These are `.merge`’d into any
-
# `query` given at the client level.
-
#
-
# @return [Hash{String=>Array<String>, String, nil}, nil]
-
1
optional :extra_query, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::ArrayOf[String]]
-
-
# @!attribute extra_headers
-
# Extra headers to send with the request. These are `.merged`’d into any
-
# `extra_headers` given at the client level.
-
#
-
# @return [Hash{String=>String, nil}, nil]
-
1
optional :extra_headers, OpenAI::Internal::Type::HashOf[String, nil?: true]
-
-
# @!attribute extra_body
-
# Extra data to send with the request. These are deep merged into any data
-
# generated as part of the normal request.
-
#
-
# @return [Object, nil]
-
1
optional :extra_body, OpenAI::Internal::Type::HashOf[OpenAI::Internal::Type::Unknown]
-
-
# @!attribute max_retries
-
# Maximum number of retries to attempt after a failed initial request.
-
#
-
# @return [Integer, nil]
-
1
optional :max_retries, Integer
-
-
# @!attribute timeout
-
# Request timeout in seconds.
-
#
-
# @return [Float, nil]
-
1
optional :timeout, Float
-
-
# @!method initialize(values = {})
-
# Returns a new instance of RequestOptions.
-
#
-
# @param values [Hash{Symbol=>Object}]
-
-
1
define_sorbet_constant!(:OrHash) do
-
T.type_alias { T.any(OpenAI::RequestOptions, OpenAI::Internal::AnyHash) }
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Audio
-
# @return [OpenAI::Resources::Audio::Transcriptions]
-
1
attr_reader :transcriptions
-
-
# @return [OpenAI::Resources::Audio::Translations]
-
1
attr_reader :translations
-
-
# @return [OpenAI::Resources::Audio::Speech]
-
1
attr_reader :speech
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
2
@transcriptions = OpenAI::Resources::Audio::Transcriptions.new(client: client)
-
2
@translations = OpenAI::Resources::Audio::Translations.new(client: client)
-
2
@speech = OpenAI::Resources::Audio::Speech.new(client: client)
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Audio
-
1
class Speech
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Audio::SpeechCreateParams} for more details.
-
#
-
# Generates audio from the input text.
-
#
-
# @overload create(input:, model:, voice:, instructions: nil, response_format: nil, speed: nil, stream_format: nil, request_options: {})
-
#
-
# @param input [String] The text to generate audio for. The maximum length is 4096 characters.
-
#
-
# @param model [String, Symbol, OpenAI::Models::Audio::SpeechModel] One of the available [TTS models](https://platform.openai.com/docs/models#tts):
-
#
-
# @param voice [String, Symbol, OpenAI::Models::Audio::SpeechCreateParams::Voice] The voice to use when generating the audio. Supported voices are `alloy`, `ash`,
-
#
-
# @param instructions [String] Control the voice of your generated audio with additional instructions. Does not
-
#
-
# @param response_format [Symbol, OpenAI::Models::Audio::SpeechCreateParams::ResponseFormat] The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav
-
#
-
# @param speed [Float] The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is
-
#
-
# @param stream_format [Symbol, OpenAI::Models::Audio::SpeechCreateParams::StreamFormat] The format to stream the audio in. Supported formats are `sse` and `audio`. `sse
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [StringIO]
-
#
-
# @see OpenAI::Models::Audio::SpeechCreateParams
-
1
def create(params)
-
parsed, options = OpenAI::Audio::SpeechCreateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: "audio/speech",
-
headers: {"accept" => "application/octet-stream"},
-
body: parsed,
-
model: StringIO,
-
options: options
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Audio
-
1
class Transcriptions
-
# See {OpenAI::Resources::Audio::Transcriptions#create_streaming} for streaming
-
# counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Audio::TranscriptionCreateParams} for more details.
-
#
-
# Transcribes audio into the input language.
-
#
-
# @overload create(file:, model:, chunking_strategy: nil, include: nil, language: nil, prompt: nil, response_format: nil, temperature: nil, timestamp_granularities: nil, request_options: {})
-
#
-
# @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The audio file object (not file name) to transcribe, in one of these formats: fl
-
#
-
# @param model [String, Symbol, OpenAI::Models::AudioModel] ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transc
-
#
-
# @param chunking_strategy [Symbol, :auto, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig, nil] Controls how the audio is cut into chunks. When set to `"auto"`, the server firs
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Audio::TranscriptionInclude>] Additional information to include in the transcription response.
-
#
-
# @param language [String] The language of the input audio. Supplying the input language in [ISO-639-1](htt
-
#
-
# @param prompt [String] An optional text to guide the model's style or continue a previous audio segment
-
#
-
# @param response_format [Symbol, OpenAI::Models::AudioResponseFormat] The format of the output, in one of these options: `json`, `text`, `srt`, `verbo
-
#
-
# @param temperature [Float] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
-
#
-
# @param timestamp_granularities [Array<Symbol, OpenAI::Models::Audio::TranscriptionCreateParams::TimestampGranularity>] The timestamp granularities to populate for this transcription. `response_format
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Audio::Transcription, OpenAI::Models::Audio::TranscriptionVerbose]
-
#
-
# @see OpenAI::Models::Audio::TranscriptionCreateParams
-
1
def create(params)
-
parsed, options = OpenAI::Audio::TranscriptionCreateParams.dump_request(params)
-
then: 0
else: 0
if parsed[:stream]
-
message = "Please use `#create_streaming` for the streaming use case."
-
raise ArgumentError.new(message)
-
end
-
@client.request(
-
method: :post,
-
path: "audio/transcriptions",
-
headers: {"content-type" => "multipart/form-data"},
-
body: parsed,
-
model: OpenAI::Models::Audio::TranscriptionCreateResponse,
-
options: options
-
)
-
end
-
-
# See {OpenAI::Resources::Audio::Transcriptions#create} for non-streaming
-
# counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Audio::TranscriptionCreateParams} for more details.
-
#
-
# Transcribes audio into the input language.
-
#
-
# @overload create_streaming(file:, model:, chunking_strategy: nil, include: nil, language: nil, prompt: nil, response_format: nil, temperature: nil, timestamp_granularities: nil, request_options: {})
-
#
-
# @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The audio file object (not file name) to transcribe, in one of these formats: fl
-
#
-
# @param model [String, Symbol, OpenAI::Models::AudioModel] ID of the model to use. The options are `gpt-4o-transcribe`, `gpt-4o-mini-transc
-
#
-
# @param chunking_strategy [Symbol, :auto, OpenAI::Models::Audio::TranscriptionCreateParams::ChunkingStrategy::VadConfig, nil] Controls how the audio is cut into chunks. When set to `"auto"`, the server firs
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Audio::TranscriptionInclude>] Additional information to include in the transcription response.
-
#
-
# @param language [String] The language of the input audio. Supplying the input language in [ISO-639-1](htt
-
#
-
# @param prompt [String] An optional text to guide the model's style or continue a previous audio segment
-
#
-
# @param response_format [Symbol, OpenAI::Models::AudioResponseFormat] The format of the output, in one of these options: `json`, `text`, `srt`, `verbo
-
#
-
# @param temperature [Float] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
-
#
-
# @param timestamp_granularities [Array<Symbol, OpenAI::Models::Audio::TranscriptionCreateParams::TimestampGranularity>] The timestamp granularities to populate for this transcription. `response_format
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::Stream<OpenAI::Models::Audio::TranscriptionTextDeltaEvent, OpenAI::Models::Audio::TranscriptionTextDoneEvent>]
-
#
-
# @see OpenAI::Models::Audio::TranscriptionCreateParams
-
1
def create_streaming(params)
-
parsed, options = OpenAI::Audio::TranscriptionCreateParams.dump_request(params)
-
else: 0
then: 0
unless parsed.fetch(:stream, true)
-
message = "Please use `#create` for the non-streaming use case."
-
raise ArgumentError.new(message)
-
end
-
parsed.store(:stream, true)
-
@client.request(
-
method: :post,
-
path: "audio/transcriptions",
-
headers: {"content-type" => "multipart/form-data", "accept" => "text/event-stream"},
-
body: parsed,
-
stream: OpenAI::Internal::Stream,
-
model: OpenAI::Audio::TranscriptionStreamEvent,
-
options: options
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Audio
-
1
class Translations
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Audio::TranslationCreateParams} for more details.
-
#
-
# Translates audio into English.
-
#
-
# @overload create(file:, model:, prompt: nil, response_format: nil, temperature: nil, request_options: {})
-
#
-
# @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The audio file object (not file name) translate, in one of these formats: flac,
-
#
-
# @param model [String, Symbol, OpenAI::Models::AudioModel] ID of the model to use. Only `whisper-1` (which is powered by our open source Wh
-
#
-
# @param prompt [String] An optional text to guide the model's style or continue a previous audio segment
-
#
-
# @param response_format [Symbol, OpenAI::Models::Audio::TranslationCreateParams::ResponseFormat] The format of the output, in one of these options: `json`, `text`, `srt`, `verbo
-
#
-
# @param temperature [Float] The sampling temperature, between 0 and 1. Higher values like 0.8 will make the
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Audio::Translation, OpenAI::Models::Audio::TranslationVerbose]
-
#
-
# @see OpenAI::Models::Audio::TranslationCreateParams
-
1
def create(params)
-
parsed, options = OpenAI::Audio::TranslationCreateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: "audio/translations",
-
headers: {"content-type" => "multipart/form-data"},
-
body: parsed,
-
model: OpenAI::Models::Audio::TranslationCreateResponse,
-
options: options
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Batches
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::BatchCreateParams} for more details.
-
#
-
# Creates and executes a batch from an uploaded file of requests
-
#
-
# @overload create(completion_window:, endpoint:, input_file_id:, metadata: nil, request_options: {})
-
#
-
# @param completion_window [Symbol, OpenAI::Models::BatchCreateParams::CompletionWindow] The time frame within which the batch should be processed. Currently only `24h`
-
#
-
# @param endpoint [Symbol, OpenAI::Models::BatchCreateParams::Endpoint] The endpoint to be used for all requests in the batch. Currently `/v1/responses`
-
#
-
# @param input_file_id [String] The ID of an uploaded file that contains requests for the new batch.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Batch]
-
#
-
# @see OpenAI::Models::BatchCreateParams
-
1
def create(params)
-
parsed, options = OpenAI::BatchCreateParams.dump_request(params)
-
@client.request(method: :post, path: "batches", body: parsed, model: OpenAI::Batch, options: options)
-
end
-
-
# Retrieves a batch.
-
#
-
# @overload retrieve(batch_id, request_options: {})
-
#
-
# @param batch_id [String] The ID of the batch to retrieve.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Batch]
-
#
-
# @see OpenAI::Models::BatchRetrieveParams
-
1
def retrieve(batch_id, params = {})
-
@client.request(
-
method: :get,
-
path: ["batches/%1$s", batch_id],
-
model: OpenAI::Batch,
-
options: params[:request_options]
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::BatchListParams} for more details.
-
#
-
# List your organization's batches.
-
#
-
# @overload list(after: nil, limit: nil, request_options: {})
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::Batch>]
-
#
-
# @see OpenAI::Models::BatchListParams
-
1
def list(params = {})
-
parsed, options = OpenAI::BatchListParams.dump_request(params)
-
@client.request(
-
method: :get,
-
path: "batches",
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::Batch,
-
options: options
-
)
-
end
-
-
# Cancels an in-progress batch. The batch will be in status `cancelling` for up to
-
# 10 minutes, before changing to `cancelled`, where it will have partial results
-
# (if any) available in the output file.
-
#
-
# @overload cancel(batch_id, request_options: {})
-
#
-
# @param batch_id [String] The ID of the batch to cancel.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Batch]
-
#
-
# @see OpenAI::Models::BatchCancelParams
-
1
def cancel(batch_id, params = {})
-
@client.request(
-
method: :post,
-
path: ["batches/%1$s/cancel", batch_id],
-
model: OpenAI::Batch,
-
options: params[:request_options]
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Beta
-
# @return [OpenAI::Resources::Beta::Assistants]
-
1
attr_reader :assistants
-
-
# @return [OpenAI::Resources::Beta::Threads]
-
1
attr_reader :threads
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
2
@assistants = OpenAI::Resources::Beta::Assistants.new(client: client)
-
2
@threads = OpenAI::Resources::Beta::Threads.new(client: client)
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Beta
-
1
class Assistants
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantCreateParams} for more details.
-
#
-
# Create an assistant with a model and instructions.
-
#
-
# @overload create(model:, description: nil, instructions: nil, metadata: nil, name: nil, reasoning_effort: nil, response_format: nil, temperature: nil, tool_resources: nil, tools: nil, top_p: nil, request_options: {})
-
#
-
# @param model [String, Symbol, OpenAI::Models::ChatModel] ID of the model to use. You can use the [List models](https://platform.openai.co
-
#
-
# @param description [String, nil] The description of the assistant. The maximum length is 512 characters.
-
#
-
# @param instructions [String, nil] The system instructions that the assistant uses. The maximum length is 256,000 c
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param name [String, nil] The name of the assistant. The maximum length is 256 characters.
-
#
-
# @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
-
#
-
# @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https:
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param tool_resources [OpenAI::Models::Beta::AssistantCreateParams::ToolResources, nil] A set of resources that are used by the assistant's tools. The resources are spe
-
#
-
# @param tools [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool>] A list of tool enabled on the assistant. There can be a maximum of 128 tools per
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::Assistant]
-
#
-
# @see OpenAI::Models::Beta::AssistantCreateParams
-
1
def create(params)
-
parsed, options = OpenAI::Beta::AssistantCreateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: "assistants",
-
body: parsed,
-
model: OpenAI::Beta::Assistant,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# Retrieves an assistant.
-
#
-
# @overload retrieve(assistant_id, request_options: {})
-
#
-
# @param assistant_id [String] The ID of the assistant to retrieve.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::Assistant]
-
#
-
# @see OpenAI::Models::Beta::AssistantRetrieveParams
-
1
def retrieve(assistant_id, params = {})
-
@client.request(
-
method: :get,
-
path: ["assistants/%1$s", assistant_id],
-
model: OpenAI::Beta::Assistant,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **params[:request_options].to_h}
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantUpdateParams} for more details.
-
#
-
# Modifies an assistant.
-
#
-
# @overload update(assistant_id, description: nil, instructions: nil, metadata: nil, model: nil, name: nil, reasoning_effort: nil, response_format: nil, temperature: nil, tool_resources: nil, tools: nil, top_p: nil, request_options: {})
-
#
-
# @param assistant_id [String] The ID of the assistant to modify.
-
#
-
# @param description [String, nil] The description of the assistant. The maximum length is 512 characters.
-
#
-
# @param instructions [String, nil] The system instructions that the assistant uses. The maximum length is 256,000 c
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param model [String, Symbol, OpenAI::Models::Beta::AssistantUpdateParams::Model] ID of the model to use. You can use the [List models](https://platform.openai.co
-
#
-
# @param name [String, nil] The name of the assistant. The maximum length is 256 characters.
-
#
-
# @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
-
#
-
# @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https:
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param tool_resources [OpenAI::Models::Beta::AssistantUpdateParams::ToolResources, nil] A set of resources that are used by the assistant's tools. The resources are spe
-
#
-
# @param tools [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool>] A list of tool enabled on the assistant. There can be a maximum of 128 tools per
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::Assistant]
-
#
-
# @see OpenAI::Models::Beta::AssistantUpdateParams
-
1
def update(assistant_id, params = {})
-
parsed, options = OpenAI::Beta::AssistantUpdateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: ["assistants/%1$s", assistant_id],
-
body: parsed,
-
model: OpenAI::Beta::Assistant,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::AssistantListParams} for more details.
-
#
-
# Returns a list of assistants.
-
#
-
# @overload list(after: nil, before: nil, limit: nil, order: nil, request_options: {})
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param order [Symbol, OpenAI::Models::Beta::AssistantListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::Beta::Assistant>]
-
#
-
# @see OpenAI::Models::Beta::AssistantListParams
-
1
def list(params = {})
-
parsed, options = OpenAI::Beta::AssistantListParams.dump_request(params)
-
@client.request(
-
method: :get,
-
path: "assistants",
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::Beta::Assistant,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# Delete an assistant.
-
#
-
# @overload delete(assistant_id, request_options: {})
-
#
-
# @param assistant_id [String] The ID of the assistant to delete.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::AssistantDeleted]
-
#
-
# @see OpenAI::Models::Beta::AssistantDeleteParams
-
1
def delete(assistant_id, params = {})
-
@client.request(
-
method: :delete,
-
path: ["assistants/%1$s", assistant_id],
-
model: OpenAI::Beta::AssistantDeleted,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **params[:request_options].to_h}
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Beta
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
1
class Threads
-
# @return [OpenAI::Resources::Beta::Threads::Runs]
-
1
attr_reader :runs
-
-
# @return [OpenAI::Resources::Beta::Threads::Messages]
-
1
attr_reader :messages
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateParams} for more details.
-
#
-
# Create a thread.
-
#
-
# @overload create(messages: nil, metadata: nil, tool_resources: nil, request_options: {})
-
#
-
# @param messages [Array<OpenAI::Models::Beta::ThreadCreateParams::Message>] A list of [messages](https://platform.openai.com/docs/api-reference/messages) to
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param tool_resources [OpenAI::Models::Beta::ThreadCreateParams::ToolResources, nil] A set of resources that are made available to the assistant's tools in this thre
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::Thread]
-
#
-
# @see OpenAI::Models::Beta::ThreadCreateParams
-
1
def create(params = {})
-
parsed, options = OpenAI::Beta::ThreadCreateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: "threads",
-
body: parsed,
-
model: OpenAI::Beta::Thread,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# Retrieves a thread.
-
#
-
# @overload retrieve(thread_id, request_options: {})
-
#
-
# @param thread_id [String] The ID of the thread to retrieve.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::Thread]
-
#
-
# @see OpenAI::Models::Beta::ThreadRetrieveParams
-
1
def retrieve(thread_id, params = {})
-
@client.request(
-
method: :get,
-
path: ["threads/%1$s", thread_id],
-
model: OpenAI::Beta::Thread,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **params[:request_options].to_h}
-
)
-
end
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadUpdateParams} for more details.
-
#
-
# Modifies a thread.
-
#
-
# @overload update(thread_id, metadata: nil, tool_resources: nil, request_options: {})
-
#
-
# @param thread_id [String] The ID of the thread to modify. Only the `metadata` can be modified.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param tool_resources [OpenAI::Models::Beta::ThreadUpdateParams::ToolResources, nil] A set of resources that are made available to the assistant's tools in this thre
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::Thread]
-
#
-
# @see OpenAI::Models::Beta::ThreadUpdateParams
-
1
def update(thread_id, params = {})
-
parsed, options = OpenAI::Beta::ThreadUpdateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: ["threads/%1$s", thread_id],
-
body: parsed,
-
model: OpenAI::Beta::Thread,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# Delete a thread.
-
#
-
# @overload delete(thread_id, request_options: {})
-
#
-
# @param thread_id [String] The ID of the thread to delete.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::ThreadDeleted]
-
#
-
# @see OpenAI::Models::Beta::ThreadDeleteParams
-
1
def delete(thread_id, params = {})
-
@client.request(
-
method: :delete,
-
path: ["threads/%1$s", thread_id],
-
model: OpenAI::Beta::ThreadDeleted,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **params[:request_options].to_h}
-
)
-
end
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# See {OpenAI::Resources::Beta::Threads#stream_raw} for streaming counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateAndRunParams} for more details.
-
#
-
# Create a thread and run it in one request.
-
#
-
# @overload create_and_run(assistant_id:, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, response_format: nil, temperature: nil, thread: nil, tool_choice: nil, tool_resources: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {})
-
#
-
# @param assistant_id [String] The ID of the [assistant](https://platform.openai.com/docs/api-reference/assista
-
#
-
# @param instructions [String, nil] Override the default system message of the assistant. This is useful for modifyi
-
#
-
# @param max_completion_tokens [Integer, nil] The maximum number of completion tokens that may be used over the course of the
-
#
-
# @param max_prompt_tokens [Integer, nil] The maximum number of prompt tokens that may be used over the course of the run.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param model [String, Symbol, OpenAI::Models::ChatModel, nil] The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
-
#
-
# @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g
-
#
-
# @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https:
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param thread [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread] Options to create a new thread. If no thread is provided when running a
-
#
-
# @param tool_choice [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] Controls which (if any) tool is called by the model.
-
#
-
# @param tool_resources [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources, nil] A set of resources that are used by the assistant's tools. The resources are spe
-
#
-
# @param tools [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool>, nil] Override the tools the assistant can use for this run. This is useful for modify
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the
-
#
-
# @param truncation_strategy [OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy, nil] Controls for how a thread will be truncated prior to the run. Use this to contro
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
#
-
# @see OpenAI::Models::Beta::ThreadCreateAndRunParams
-
1
def create_and_run(params)
-
parsed, options = OpenAI::Beta::ThreadCreateAndRunParams.dump_request(params)
-
then: 0
else: 0
if parsed[:stream]
-
message = "Please use `#stream_raw` for the streaming use case."
-
raise ArgumentError.new(message)
-
end
-
@client.request(
-
method: :post,
-
path: "threads/runs",
-
body: parsed,
-
model: OpenAI::Beta::Threads::Run,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
1
def stream
-
raise NotImplementedError.new("higher level helpers are coming soon!")
-
end
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# See {OpenAI::Resources::Beta::Threads#create_and_run} for non-streaming
-
# counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::ThreadCreateAndRunParams} for more details.
-
#
-
# Create a thread and run it in one request.
-
#
-
# @overload stream_raw(assistant_id:, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, response_format: nil, temperature: nil, thread: nil, tool_choice: nil, tool_resources: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {})
-
#
-
# @param assistant_id [String] The ID of the [assistant](https://platform.openai.com/docs/api-reference/assista
-
#
-
# @param instructions [String, nil] Override the default system message of the assistant. This is useful for modifyi
-
#
-
# @param max_completion_tokens [Integer, nil] The maximum number of completion tokens that may be used over the course of the
-
#
-
# @param max_prompt_tokens [Integer, nil] The maximum number of prompt tokens that may be used over the course of the run.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param model [String, Symbol, OpenAI::Models::ChatModel, nil] The ID of the [Model](https://platform.openai.com/docs/api-reference/models) to
-
#
-
# @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g
-
#
-
# @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Specifies the format that the model must output. Compatible with [GPT-4o](https:
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param thread [OpenAI::Models::Beta::ThreadCreateAndRunParams::Thread] Options to create a new thread. If no thread is provided when running a
-
#
-
# @param tool_choice [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] Controls which (if any) tool is called by the model.
-
#
-
# @param tool_resources [OpenAI::Models::Beta::ThreadCreateAndRunParams::ToolResources, nil] A set of resources that are used by the assistant's tools. The resources are spe
-
#
-
# @param tools [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool>, nil] Override the tools the assistant can use for this run. This is useful for modify
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the
-
#
-
# @param truncation_strategy [OpenAI::Models::Beta::ThreadCreateAndRunParams::TruncationStrategy, nil] Controls for how a thread will be truncated prior to the run. Use this to contro
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::Stream<OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete, OpenAI::Models::Beta::AssistantStreamEvent::ErrorEvent>]
-
#
-
# @see OpenAI::Models::Beta::ThreadCreateAndRunParams
-
1
def stream_raw(params)
-
parsed, options = OpenAI::Beta::ThreadCreateAndRunParams.dump_request(params)
-
else: 0
then: 0
unless parsed.fetch(:stream, true)
-
message = "Please use `#create_and_run` for the non-streaming use case."
-
raise ArgumentError.new(message)
-
end
-
parsed.store(:stream, true)
-
@client.request(
-
method: :post,
-
path: "threads/runs",
-
headers: {"accept" => "text/event-stream"},
-
body: parsed,
-
stream: OpenAI::Internal::Stream,
-
model: OpenAI::Beta::AssistantStreamEvent,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
2
@runs = OpenAI::Resources::Beta::Threads::Runs.new(client: client)
-
2
@messages = OpenAI::Resources::Beta::Threads::Messages.new(client: client)
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Beta
-
1
class Threads
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
1
class Messages
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::MessageCreateParams} for more details.
-
#
-
# Create a message.
-
#
-
# @overload create(thread_id, content:, role:, attachments: nil, metadata: nil, request_options: {})
-
#
-
# @param thread_id [String] The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) t
-
#
-
# @param content [String, Array<OpenAI::Models::Beta::Threads::ImageFileContentBlock, OpenAI::Models::Beta::Threads::ImageURLContentBlock, OpenAI::Models::Beta::Threads::TextContentBlockParam>] The text contents of the message.
-
#
-
# @param role [Symbol, OpenAI::Models::Beta::Threads::MessageCreateParams::Role] The role of the entity that is creating the message. Allowed values include:
-
#
-
# @param attachments [Array<OpenAI::Models::Beta::Threads::MessageCreateParams::Attachment>, nil] A list of files attached to the message, and the tools they should be added to.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::Threads::Message]
-
#
-
# @see OpenAI::Models::Beta::Threads::MessageCreateParams
-
1
def create(thread_id, params)
-
parsed, options = OpenAI::Beta::Threads::MessageCreateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: ["threads/%1$s/messages", thread_id],
-
body: parsed,
-
model: OpenAI::Beta::Threads::Message,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::MessageRetrieveParams} for more details.
-
#
-
# Retrieve a message.
-
#
-
# @overload retrieve(message_id, thread_id:, request_options: {})
-
#
-
# @param message_id [String] The ID of the message to retrieve.
-
#
-
# @param thread_id [String] The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) t
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::Threads::Message]
-
#
-
# @see OpenAI::Models::Beta::Threads::MessageRetrieveParams
-
1
def retrieve(message_id, params)
-
parsed, options = OpenAI::Beta::Threads::MessageRetrieveParams.dump_request(params)
-
thread_id =
-
parsed.delete(:thread_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :get,
-
path: ["threads/%1$s/messages/%2$s", thread_id, message_id],
-
model: OpenAI::Beta::Threads::Message,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::MessageUpdateParams} for more details.
-
#
-
# Modifies a message.
-
#
-
# @overload update(message_id, thread_id:, metadata: nil, request_options: {})
-
#
-
# @param message_id [String] Path param: The ID of the message to modify.
-
#
-
# @param thread_id [String] Path param: The ID of the thread to which this message belongs.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Body param: Set of 16 key-value pairs that can be attached to an object. This ca
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::Threads::Message]
-
#
-
# @see OpenAI::Models::Beta::Threads::MessageUpdateParams
-
1
def update(message_id, params)
-
parsed, options = OpenAI::Beta::Threads::MessageUpdateParams.dump_request(params)
-
thread_id =
-
parsed.delete(:thread_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :post,
-
path: ["threads/%1$s/messages/%2$s", thread_id, message_id],
-
body: parsed,
-
model: OpenAI::Beta::Threads::Message,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::MessageListParams} for more details.
-
#
-
# Returns a list of messages for a given thread.
-
#
-
# @overload list(thread_id, after: nil, before: nil, limit: nil, order: nil, run_id: nil, request_options: {})
-
#
-
# @param thread_id [String] The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) t
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param order [Symbol, OpenAI::Models::Beta::Threads::MessageListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord
-
#
-
# @param run_id [String] Filter messages by the run ID that generated them.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::Beta::Threads::Message>]
-
#
-
# @see OpenAI::Models::Beta::Threads::MessageListParams
-
1
def list(thread_id, params = {})
-
parsed, options = OpenAI::Beta::Threads::MessageListParams.dump_request(params)
-
@client.request(
-
method: :get,
-
path: ["threads/%1$s/messages", thread_id],
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::Beta::Threads::Message,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# Deletes a message.
-
#
-
# @overload delete(message_id, thread_id:, request_options: {})
-
#
-
# @param message_id [String] The ID of the message to delete.
-
#
-
# @param thread_id [String] The ID of the thread to which this message belongs.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::Threads::MessageDeleted]
-
#
-
# @see OpenAI::Models::Beta::Threads::MessageDeleteParams
-
1
def delete(message_id, params)
-
parsed, options = OpenAI::Beta::Threads::MessageDeleteParams.dump_request(params)
-
thread_id =
-
parsed.delete(:thread_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :delete,
-
path: ["threads/%1$s/messages/%2$s", thread_id, message_id],
-
model: OpenAI::Beta::Threads::MessageDeleted,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Beta
-
1
class Threads
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
1
class Runs
-
# @return [OpenAI::Resources::Beta::Threads::Runs::Steps]
-
1
attr_reader :steps
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# See {OpenAI::Resources::Beta::Threads::Runs#create_stream_raw} for streaming
-
# counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::RunCreateParams} for more details.
-
#
-
# Create a run.
-
#
-
# @overload create(thread_id, assistant_id:, include: nil, additional_instructions: nil, additional_messages: nil, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, reasoning_effort: nil, response_format: nil, temperature: nil, tool_choice: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {})
-
#
-
# @param thread_id [String] Path param: The ID of the thread to run.
-
#
-
# @param assistant_id [String] Body param: The ID of the [assistant](https://platform.openai.com/docs/api-refer
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Beta::Threads::Runs::RunStepInclude>] Query param: A list of additional fields to include in the response. Currently t
-
#
-
# @param additional_instructions [String, nil] Body param: Appends additional instructions at the end of the instructions for t
-
#
-
# @param additional_messages [Array<OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage>, nil] Body param: Adds additional messages to the thread before creating the run.
-
#
-
# @param instructions [String, nil] Body param: Overrides the [instructions](https://platform.openai.com/docs/api-re
-
#
-
# @param max_completion_tokens [Integer, nil] Body param: The maximum number of completion tokens that may be used over the co
-
#
-
# @param max_prompt_tokens [Integer, nil] Body param: The maximum number of prompt tokens that may be used over the course
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Body param: Set of 16 key-value pairs that can be attached to an object. This ca
-
#
-
# @param model [String, Symbol, OpenAI::Models::ChatModel, nil] Body param: The ID of the [Model](https://platform.openai.com/docs/api-reference
-
#
-
# @param parallel_tool_calls [Boolean] Body param: Whether to enable [parallel function calling](https://platform.opena
-
#
-
# @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Body param: **o-series models only**
-
#
-
# @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Body param: Specifies the format that the model must output. Compatible with [GP
-
#
-
# @param temperature [Float, nil] Body param: What sampling temperature to use, between 0 and 2. Higher values lik
-
#
-
# @param tool_choice [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] Body param: Controls which (if any) tool is called by the model.
-
#
-
# @param tools [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool>, nil] Body param: Override the tools the assistant can use for this run. This is usefu
-
#
-
# @param top_p [Float, nil] Body param: An alternative to sampling with temperature, called nucleus sampling
-
#
-
# @param truncation_strategy [OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy, nil] Body param: Controls for how a thread will be truncated prior to the run. Use th
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
#
-
# @see OpenAI::Models::Beta::Threads::RunCreateParams
-
1
def create(thread_id, params)
-
parsed, options = OpenAI::Beta::Threads::RunCreateParams.dump_request(params)
-
then: 0
else: 0
if parsed[:stream]
-
message = "Please use `#create_stream_raw` for the streaming use case."
-
raise ArgumentError.new(message)
-
end
-
query_params = [:include]
-
@client.request(
-
method: :post,
-
path: ["threads/%1$s/runs", thread_id],
-
query: parsed.slice(*query_params),
-
body: parsed.except(*query_params),
-
model: OpenAI::Beta::Threads::Run,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# See {OpenAI::Resources::Beta::Threads::Runs#create} for non-streaming
-
# counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::RunCreateParams} for more details.
-
#
-
# Create a run.
-
#
-
# @overload create_stream_raw(thread_id, assistant_id:, include: nil, additional_instructions: nil, additional_messages: nil, instructions: nil, max_completion_tokens: nil, max_prompt_tokens: nil, metadata: nil, model: nil, parallel_tool_calls: nil, reasoning_effort: nil, response_format: nil, temperature: nil, tool_choice: nil, tools: nil, top_p: nil, truncation_strategy: nil, request_options: {})
-
#
-
# @param thread_id [String] Path param: The ID of the thread to run.
-
#
-
# @param assistant_id [String] Body param: The ID of the [assistant](https://platform.openai.com/docs/api-refer
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Beta::Threads::Runs::RunStepInclude>] Query param: A list of additional fields to include in the response. Currently t
-
#
-
# @param additional_instructions [String, nil] Body param: Appends additional instructions at the end of the instructions for t
-
#
-
# @param additional_messages [Array<OpenAI::Models::Beta::Threads::RunCreateParams::AdditionalMessage>, nil] Body param: Adds additional messages to the thread before creating the run.
-
#
-
# @param instructions [String, nil] Body param: Overrides the [instructions](https://platform.openai.com/docs/api-re
-
#
-
# @param max_completion_tokens [Integer, nil] Body param: The maximum number of completion tokens that may be used over the co
-
#
-
# @param max_prompt_tokens [Integer, nil] Body param: The maximum number of prompt tokens that may be used over the course
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Body param: Set of 16 key-value pairs that can be attached to an object. This ca
-
#
-
# @param model [String, Symbol, OpenAI::Models::ChatModel, nil] Body param: The ID of the [Model](https://platform.openai.com/docs/api-reference
-
#
-
# @param parallel_tool_calls [Boolean] Body param: Whether to enable [parallel function calling](https://platform.opena
-
#
-
# @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] Body param: **o-series models only**
-
#
-
# @param response_format [Symbol, :auto, OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONObject, OpenAI::Models::ResponseFormatJSONSchema, nil] Body param: Specifies the format that the model must output. Compatible with [GP
-
#
-
# @param temperature [Float, nil] Body param: What sampling temperature to use, between 0 and 2. Higher values lik
-
#
-
# @param tool_choice [Symbol, OpenAI::Models::Beta::AssistantToolChoiceOption::Auto, OpenAI::Models::Beta::AssistantToolChoice, nil] Body param: Controls which (if any) tool is called by the model.
-
#
-
# @param tools [Array<OpenAI::Models::Beta::CodeInterpreterTool, OpenAI::Models::Beta::FileSearchTool, OpenAI::Models::Beta::FunctionTool>, nil] Body param: Override the tools the assistant can use for this run. This is usefu
-
#
-
# @param top_p [Float, nil] Body param: An alternative to sampling with temperature, called nucleus sampling
-
#
-
# @param truncation_strategy [OpenAI::Models::Beta::Threads::RunCreateParams::TruncationStrategy, nil] Body param: Controls for how a thread will be truncated prior to the run. Use th
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::Stream<OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete, OpenAI::Models::Beta::AssistantStreamEvent::ErrorEvent>]
-
#
-
# @see OpenAI::Models::Beta::Threads::RunCreateParams
-
1
def create_stream_raw(thread_id, params)
-
parsed, options = OpenAI::Beta::Threads::RunCreateParams.dump_request(params)
-
else: 0
then: 0
unless parsed.fetch(:stream, true)
-
message = "Please use `#create` for the non-streaming use case."
-
raise ArgumentError.new(message)
-
end
-
parsed.store(:stream, true)
-
query_params = [:include]
-
@client.request(
-
method: :post,
-
path: ["threads/%1$s/runs", thread_id],
-
query: parsed.slice(*query_params),
-
headers: {"accept" => "text/event-stream"},
-
body: parsed.except(*query_params),
-
stream: OpenAI::Internal::Stream,
-
model: OpenAI::Beta::AssistantStreamEvent,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::RunRetrieveParams} for more details.
-
#
-
# Retrieves a run.
-
#
-
# @overload retrieve(run_id, thread_id:, request_options: {})
-
#
-
# @param run_id [String] The ID of the run to retrieve.
-
#
-
# @param thread_id [String] The ID of the [thread](https://platform.openai.com/docs/api-reference/threads) t
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
#
-
# @see OpenAI::Models::Beta::Threads::RunRetrieveParams
-
1
def retrieve(run_id, params)
-
parsed, options = OpenAI::Beta::Threads::RunRetrieveParams.dump_request(params)
-
thread_id =
-
parsed.delete(:thread_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :get,
-
path: ["threads/%1$s/runs/%2$s", thread_id, run_id],
-
model: OpenAI::Beta::Threads::Run,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::RunUpdateParams} for more details.
-
#
-
# Modifies a run.
-
#
-
# @overload update(run_id, thread_id:, metadata: nil, request_options: {})
-
#
-
# @param run_id [String] Path param: The ID of the run to modify.
-
#
-
# @param thread_id [String] Path param: The ID of the [thread](https://platform.openai.com/docs/api-referenc
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Body param: Set of 16 key-value pairs that can be attached to an object. This ca
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
#
-
# @see OpenAI::Models::Beta::Threads::RunUpdateParams
-
1
def update(run_id, params)
-
parsed, options = OpenAI::Beta::Threads::RunUpdateParams.dump_request(params)
-
thread_id =
-
parsed.delete(:thread_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :post,
-
path: ["threads/%1$s/runs/%2$s", thread_id, run_id],
-
body: parsed,
-
model: OpenAI::Beta::Threads::Run,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::RunListParams} for more details.
-
#
-
# Returns a list of runs belonging to a thread.
-
#
-
# @overload list(thread_id, after: nil, before: nil, limit: nil, order: nil, request_options: {})
-
#
-
# @param thread_id [String] The ID of the thread the run belongs to.
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param order [Symbol, OpenAI::Models::Beta::Threads::RunListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::Beta::Threads::Run>]
-
#
-
# @see OpenAI::Models::Beta::Threads::RunListParams
-
1
def list(thread_id, params = {})
-
parsed, options = OpenAI::Beta::Threads::RunListParams.dump_request(params)
-
@client.request(
-
method: :get,
-
path: ["threads/%1$s/runs", thread_id],
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::Beta::Threads::Run,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# Cancels a run that is `in_progress`.
-
#
-
# @overload cancel(run_id, thread_id:, request_options: {})
-
#
-
# @param run_id [String] The ID of the run to cancel.
-
#
-
# @param thread_id [String] The ID of the thread to which this run belongs.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
#
-
# @see OpenAI::Models::Beta::Threads::RunCancelParams
-
1
def cancel(run_id, params)
-
parsed, options = OpenAI::Beta::Threads::RunCancelParams.dump_request(params)
-
thread_id =
-
parsed.delete(:thread_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :post,
-
path: ["threads/%1$s/runs/%2$s/cancel", thread_id, run_id],
-
model: OpenAI::Beta::Threads::Run,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# See {OpenAI::Resources::Beta::Threads::Runs#submit_tool_outputs_stream_raw} for
-
# streaming counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams} for more details.
-
#
-
# When a run has the `status: "requires_action"` and `required_action.type` is
-
# `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
-
# tool calls once they're all completed. All outputs must be submitted in a single
-
# request.
-
#
-
# @overload submit_tool_outputs(run_id, thread_id:, tool_outputs:, request_options: {})
-
#
-
# @param run_id [String] Path param: The ID of the run that requires the tool output submission.
-
#
-
# @param thread_id [String] Path param: The ID of the [thread](https://platform.openai.com/docs/api-referenc
-
#
-
# @param tool_outputs [Array<OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput>] Body param: A list of tools for which the outputs are being submitted.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::Threads::Run]
-
#
-
# @see OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams
-
1
def submit_tool_outputs(run_id, params)
-
parsed, options = OpenAI::Beta::Threads::RunSubmitToolOutputsParams.dump_request(params)
-
then: 0
else: 0
if parsed[:stream]
-
message = "Please use `#submit_tool_outputs_stream_raw` for the streaming use case."
-
raise ArgumentError.new(message)
-
end
-
thread_id =
-
parsed.delete(:thread_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :post,
-
path: ["threads/%1$s/runs/%2$s/submit_tool_outputs", thread_id, run_id],
-
body: parsed,
-
model: OpenAI::Beta::Threads::Run,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# See {OpenAI::Resources::Beta::Threads::Runs#submit_tool_outputs} for
-
# non-streaming counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams} for more details.
-
#
-
# When a run has the `status: "requires_action"` and `required_action.type` is
-
# `submit_tool_outputs`, this endpoint can be used to submit the outputs from the
-
# tool calls once they're all completed. All outputs must be submitted in a single
-
# request.
-
#
-
# @overload submit_tool_outputs_stream_raw(run_id, thread_id:, tool_outputs:, request_options: {})
-
#
-
# @param run_id [String] Path param: The ID of the run that requires the tool output submission.
-
#
-
# @param thread_id [String] Path param: The ID of the [thread](https://platform.openai.com/docs/api-referenc
-
#
-
# @param tool_outputs [Array<OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams::ToolOutput>] Body param: A list of tools for which the outputs are being submitted.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::Stream<OpenAI::Models::Beta::AssistantStreamEvent::ThreadCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunQueued, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunRequiresAction, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunIncomplete, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunFailed, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelling, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunCancelled, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunExpired, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepDelta, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepFailed, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepCancelled, OpenAI::Models::Beta::AssistantStreamEvent::ThreadRunStepExpired, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCreated, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageInProgress, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageDelta, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageCompleted, OpenAI::Models::Beta::AssistantStreamEvent::ThreadMessageIncomplete, OpenAI::Models::Beta::AssistantStreamEvent::ErrorEvent>]
-
#
-
# @see OpenAI::Models::Beta::Threads::RunSubmitToolOutputsParams
-
1
def submit_tool_outputs_stream_raw(run_id, params)
-
parsed, options = OpenAI::Beta::Threads::RunSubmitToolOutputsParams.dump_request(params)
-
else: 0
then: 0
unless parsed.fetch(:stream, true)
-
message = "Please use `#submit_tool_outputs` for the non-streaming use case."
-
raise ArgumentError.new(message)
-
end
-
parsed.store(:stream, true)
-
thread_id =
-
parsed.delete(:thread_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :post,
-
path: ["threads/%1$s/runs/%2$s/submit_tool_outputs", thread_id, run_id],
-
headers: {"accept" => "text/event-stream"},
-
body: parsed,
-
stream: OpenAI::Internal::Stream,
-
model: OpenAI::Beta::AssistantStreamEvent,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
2
@steps = OpenAI::Resources::Beta::Threads::Runs::Steps.new(client: client)
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Beta
-
1
class Threads
-
1
class Runs
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
1
class Steps
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::StepRetrieveParams} for more details.
-
#
-
# Retrieves a run step.
-
#
-
# @overload retrieve(step_id, thread_id:, run_id:, include: nil, request_options: {})
-
#
-
# @param step_id [String] Path param: The ID of the run step to retrieve.
-
#
-
# @param thread_id [String] Path param: The ID of the thread to which the run and run step belongs.
-
#
-
# @param run_id [String] Path param: The ID of the run to which the run step belongs.
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Beta::Threads::Runs::RunStepInclude>] Query param: A list of additional fields to include in the response. Currently t
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Beta::Threads::Runs::RunStep]
-
#
-
# @see OpenAI::Models::Beta::Threads::Runs::StepRetrieveParams
-
1
def retrieve(step_id, params)
-
parsed, options = OpenAI::Beta::Threads::Runs::StepRetrieveParams.dump_request(params)
-
thread_id =
-
parsed.delete(:thread_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
run_id =
-
parsed.delete(:run_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :get,
-
path: ["threads/%1$s/runs/%2$s/steps/%3$s", thread_id, run_id, step_id],
-
query: parsed,
-
model: OpenAI::Beta::Threads::Runs::RunStep,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @deprecated The Assistants API is deprecated in favor of the Responses API
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Beta::Threads::Runs::StepListParams} for more details.
-
#
-
# Returns a list of run steps belonging to a run.
-
#
-
# @overload list(run_id, thread_id:, after: nil, before: nil, include: nil, limit: nil, order: nil, request_options: {})
-
#
-
# @param run_id [String] Path param: The ID of the run the run steps belong to.
-
#
-
# @param thread_id [String] Path param: The ID of the thread the run and run steps belong to.
-
#
-
# @param after [String] Query param: A cursor for use in pagination. `after` is an object ID that define
-
#
-
# @param before [String] Query param: A cursor for use in pagination. `before` is an object ID that defin
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Beta::Threads::Runs::RunStepInclude>] Query param: A list of additional fields to include in the response. Currently t
-
#
-
# @param limit [Integer] Query param: A limit on the number of objects to be returned. Limit can range be
-
#
-
# @param order [Symbol, OpenAI::Models::Beta::Threads::Runs::StepListParams::Order] Query param: Sort order by the `created_at` timestamp of the objects. `asc` for
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::Beta::Threads::Runs::RunStep>]
-
#
-
# @see OpenAI::Models::Beta::Threads::Runs::StepListParams
-
1
def list(run_id, params)
-
parsed, options = OpenAI::Beta::Threads::Runs::StepListParams.dump_request(params)
-
thread_id =
-
parsed.delete(:thread_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :get,
-
path: ["threads/%1$s/runs/%2$s/steps", thread_id, run_id],
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::Beta::Threads::Runs::RunStep,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Chat
-
# @return [OpenAI::Resources::Chat::Completions]
-
1
attr_reader :completions
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
2
@completions = OpenAI::Resources::Chat::Completions.new(client: client)
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Chat
-
1
class Completions
-
# @return [OpenAI::Resources::Chat::Completions::Messages]
-
1
attr_reader :messages
-
-
# See {OpenAI::Resources::Chat::Completions#stream_raw} for streaming counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::CompletionCreateParams} for more details.
-
#
-
# **Starting a new project?** We recommend trying
-
# [Responses](https://platform.openai.com/docs/api-reference/responses) to take
-
# advantage of the latest OpenAI platform features. Compare
-
# [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
-
#
-
# ---
-
#
-
# Creates a model response for the given chat conversation. Learn more in the
-
# [text generation](https://platform.openai.com/docs/guides/text-generation),
-
# [vision](https://platform.openai.com/docs/guides/vision), and
-
# [audio](https://platform.openai.com/docs/guides/audio) guides.
-
#
-
# Parameter support can differ depending on the model used to generate the
-
# response, particularly for newer reasoning models. Parameters that are only
-
# supported for reasoning models are noted below. For the current state of
-
# unsupported parameters in reasoning models,
-
# [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
-
#
-
# @overload create(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {})
-
#
-
# @param messages [Array<OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, OpenAI::Models::Chat::ChatCompletionSystemMessageParam, OpenAI::Models::Chat::ChatCompletionUserMessageParam, OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, OpenAI::Models::Chat::ChatCompletionToolMessageParam, OpenAI::Models::Chat::ChatCompletionFunctionMessageParam>] A list of messages comprising the conversation so far. Depending on the
-
#
-
# @param model [String, Symbol, OpenAI::Models::ChatModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
-
#
-
# @param audio [OpenAI::Models::Chat::ChatCompletionAudioParam, nil] Parameters for audio output. Required when audio output is requested with
-
#
-
# @param frequency_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on
-
#
-
# @param function_call [Symbol, OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode, OpenAI::Models::Chat::ChatCompletionFunctionCallOption] Deprecated in favor of `tool_choice`.
-
#
-
# @param functions [Array<OpenAI::Models::Chat::CompletionCreateParams::Function>] Deprecated in favor of `tools`.
-
#
-
# @param logit_bias [Hash{Symbol=>Integer}, nil] Modify the likelihood of specified tokens appearing in the completion.
-
#
-
# @param logprobs [Boolean, nil] Whether to return log probabilities of the output tokens or not. If true,
-
#
-
# @param max_completion_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a completion,
-
#
-
# @param max_tokens [Integer, nil] The maximum number of [tokens](/tokenizer) that can be generated in the
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param modalities [Array<Symbol, OpenAI::Models::Chat::CompletionCreateParams::Modality>, nil] Output types that you would like the model to generate.
-
#
-
# @param n [Integer, nil] How many chat completion choices to generate for each input message. Note that y
-
#
-
# @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g
-
#
-
# @param prediction [OpenAI::Models::Chat::ChatCompletionPredictionContent, nil] Static predicted output content, such as the content of a text file that is
-
#
-
# @param presence_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on
-
#
-
# @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
-
#
-
# @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
-
#
-
# @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
-
#
-
# @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
-
#
-
# @param seed [Integer, nil] This feature is in Beta.
-
#
-
# @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
-
#
-
# @param stop [String, Array<String>, nil] Not supported with latest reasoning models `o3` and `o4-mini`.
-
#
-
# @param store [Boolean, nil] Whether or not to store the output of this chat completion request for
-
#
-
# @param stream_options [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] Options for streaming response. Only set this when you set `stream: true`.
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice] Controls which (if any) tool is called by the model.
-
#
-
# @param tools [Array<OpenAI::Models::Chat::ChatCompletionTool>] A list of tools the model may call. Currently, only functions are supported as a
-
#
-
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
-
#
-
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
-
#
-
# @param web_search_options [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] This tool searches the web for relevant results to use in a response.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletion]
-
#
-
# @see OpenAI::Models::Chat::CompletionCreateParams
-
1
def create(params)
-
parsed, options = OpenAI::Chat::CompletionCreateParams.dump_request(params)
-
then: 0
else: 0
if parsed[:stream]
-
message = "Please use `#stream_raw` for the streaming use case."
-
raise ArgumentError.new(message)
-
end
-
-
model = nil
-
tool_models = {}
-
case parsed
-
in: 0
in {response_format: OpenAI::StructuredOutput::JsonSchemaConverter => model}
-
parsed.update(
-
response_format: {
-
type: :json_schema,
-
json_schema: {
-
strict: true,
-
name: model.name.split("::").last,
-
schema: model.to_json_schema
-
}
-
}
-
)
-
in: 0
in {response_format: {type: :json_schema, json_schema: OpenAI::StructuredOutput::JsonSchemaConverter => model}}
-
parsed.fetch(:response_format).update(
-
json_schema: {
-
strict: true,
-
name: model.name.split("::").last,
-
schema: model.to_json_schema
-
}
-
)
-
in: 0
in {response_format: {type: :json_schema, json_schema: {schema: OpenAI::StructuredOutput::JsonSchemaConverter => model}}}
-
parsed.dig(:response_format, :json_schema).store(:schema, model.to_json_schema)
-
in: 0
in {tools: Array => tools}
-
mapped = tools.map do |tool|
-
case tool
-
in: 0
in OpenAI::StructuredOutput::JsonSchemaConverter
-
name = tool.name.split("::").last
-
tool_models.store(name, tool)
-
{
-
type: :function,
-
function: {
-
strict: true,
-
name: name,
-
parameters: tool.to_json_schema
-
}
-
}
-
in: 0
in {function: {parameters: OpenAI::StructuredOutput::JsonSchemaConverter => params}}
-
func = tool.fetch(:function)
-
name = func[:name] ||= params.name.split("::").last
-
tool_models.store(name, params)
-
func.update(parameters: params.to_json_schema)
-
tool
-
else: 0
else
-
tool
-
end
-
end
-
else: 0
tools.replace(mapped)
-
else
-
end
-
-
# rubocop:disable Metrics/BlockLength
-
unwrap = ->(raw) do
-
then: 0
else: 0
if model.is_a?(OpenAI::StructuredOutput::JsonSchemaConverter)
-
then: 0
else: 0
raw[:choices]&.each do |choice|
-
message = choice.fetch(:message)
-
begin
-
parsed = JSON.parse(message.fetch(:content), symbolize_names: true)
-
rescue JSON::ParserError => e
-
parsed = e
-
end
-
coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
-
message.store(:parsed, coerced)
-
end
-
end
-
then: 0
else: 0
raw[:choices]&.each do |choice|
-
then: 0
else: 0
choice.dig(:message, :tool_calls)&.each do |tool_call|
-
func = tool_call.fetch(:function)
-
then: 0
else: 0
next if (model = tool_models[func.fetch(:name)]).nil?
-
-
begin
-
parsed = JSON.parse(func.fetch(:arguments), symbolize_names: true)
-
rescue JSON::ParserError => e
-
parsed = e
-
end
-
coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
-
func.store(:parsed, coerced)
-
end
-
end
-
-
raw
-
end
-
# rubocop:enable Metrics/BlockLength
-
-
@client.request(
-
method: :post,
-
path: "chat/completions",
-
body: parsed,
-
unwrap: unwrap,
-
model: OpenAI::Chat::ChatCompletion,
-
options: options
-
)
-
end
-
-
1
def stream
-
raise NotImplementedError.new("higher level helpers are coming soon!")
-
end
-
-
# See {OpenAI::Resources::Chat::Completions#create} for non-streaming counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::CompletionCreateParams} for more details.
-
#
-
# **Starting a new project?** We recommend trying
-
# [Responses](https://platform.openai.com/docs/api-reference/responses) to take
-
# advantage of the latest OpenAI platform features. Compare
-
# [Chat Completions with Responses](https://platform.openai.com/docs/guides/responses-vs-chat-completions?api-mode=responses).
-
#
-
# ---
-
#
-
# Creates a model response for the given chat conversation. Learn more in the
-
# [text generation](https://platform.openai.com/docs/guides/text-generation),
-
# [vision](https://platform.openai.com/docs/guides/vision), and
-
# [audio](https://platform.openai.com/docs/guides/audio) guides.
-
#
-
# Parameter support can differ depending on the model used to generate the
-
# response, particularly for newer reasoning models. Parameters that are only
-
# supported for reasoning models are noted below. For the current state of
-
# unsupported parameters in reasoning models,
-
# [refer to the reasoning guide](https://platform.openai.com/docs/guides/reasoning).
-
#
-
# @overload stream_raw(messages:, model:, audio: nil, frequency_penalty: nil, function_call: nil, functions: nil, logit_bias: nil, logprobs: nil, max_completion_tokens: nil, max_tokens: nil, metadata: nil, modalities: nil, n: nil, parallel_tool_calls: nil, prediction: nil, presence_penalty: nil, prompt_cache_key: nil, reasoning_effort: nil, response_format: nil, safety_identifier: nil, seed: nil, service_tier: nil, stop: nil, store: nil, stream_options: nil, temperature: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, user: nil, web_search_options: nil, request_options: {})
-
#
-
# @param messages [Array<OpenAI::Models::Chat::ChatCompletionDeveloperMessageParam, OpenAI::Models::Chat::ChatCompletionSystemMessageParam, OpenAI::Models::Chat::ChatCompletionUserMessageParam, OpenAI::Models::Chat::ChatCompletionAssistantMessageParam, OpenAI::Models::Chat::ChatCompletionToolMessageParam, OpenAI::Models::Chat::ChatCompletionFunctionMessageParam>] A list of messages comprising the conversation so far. Depending on the
-
#
-
# @param model [String, Symbol, OpenAI::Models::ChatModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
-
#
-
# @param audio [OpenAI::Models::Chat::ChatCompletionAudioParam, nil] Parameters for audio output. Required when audio output is requested with
-
#
-
# @param frequency_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on
-
#
-
# @param function_call [Symbol, OpenAI::Models::Chat::CompletionCreateParams::FunctionCall::FunctionCallMode, OpenAI::Models::Chat::ChatCompletionFunctionCallOption] Deprecated in favor of `tool_choice`.
-
#
-
# @param functions [Array<OpenAI::Models::Chat::CompletionCreateParams::Function>] Deprecated in favor of `tools`.
-
#
-
# @param logit_bias [Hash{Symbol=>Integer}, nil] Modify the likelihood of specified tokens appearing in the completion.
-
#
-
# @param logprobs [Boolean, nil] Whether to return log probabilities of the output tokens or not. If true,
-
#
-
# @param max_completion_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a completion,
-
#
-
# @param max_tokens [Integer, nil] The maximum number of [tokens](/tokenizer) that can be generated in the
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param modalities [Array<Symbol, OpenAI::Models::Chat::CompletionCreateParams::Modality>, nil] Output types that you would like the model to generate.
-
#
-
# @param n [Integer, nil] How many chat completion choices to generate for each input message. Note that y
-
#
-
# @param parallel_tool_calls [Boolean] Whether to enable [parallel function calling](https://platform.openai.com/docs/g
-
#
-
# @param prediction [OpenAI::Models::Chat::ChatCompletionPredictionContent, nil] Static predicted output content, such as the content of a text file that is
-
#
-
# @param presence_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on
-
#
-
# @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
-
#
-
# @param reasoning_effort [Symbol, OpenAI::Models::ReasoningEffort, nil] **o-series models only**
-
#
-
# @param response_format [OpenAI::Models::ResponseFormatText, OpenAI::Models::ResponseFormatJSONSchema, OpenAI::Models::ResponseFormatJSONObject] An object specifying the format that the model must output.
-
#
-
# @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
-
#
-
# @param seed [Integer, nil] This feature is in Beta.
-
#
-
# @param service_tier [Symbol, OpenAI::Models::Chat::CompletionCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
-
#
-
# @param stop [String, Array<String>, nil] Not supported with latest reasoning models `o3` and `o4-mini`.
-
#
-
# @param store [Boolean, nil] Whether or not to store the output of this chat completion request for
-
#
-
# @param stream_options [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] Options for streaming response. Only set this when you set `stream: true`.
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param tool_choice [Symbol, OpenAI::Models::Chat::ChatCompletionToolChoiceOption::Auto, OpenAI::Models::Chat::ChatCompletionNamedToolChoice] Controls which (if any) tool is called by the model.
-
#
-
# @param tools [Array<OpenAI::Models::Chat::ChatCompletionTool>] A list of tools the model may call. Currently, only functions are supported as a
-
#
-
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
-
#
-
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
-
#
-
# @param web_search_options [OpenAI::Models::Chat::CompletionCreateParams::WebSearchOptions] This tool searches the web for relevant results to use in a response.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::Stream<OpenAI::Models::Chat::ChatCompletionChunk>]
-
#
-
# @see OpenAI::Models::Chat::CompletionCreateParams
-
1
def stream_raw(params)
-
parsed, options = OpenAI::Chat::CompletionCreateParams.dump_request(params)
-
else: 0
then: 0
unless parsed.fetch(:stream, true)
-
message = "Please use `#create` for the non-streaming use case."
-
raise ArgumentError.new(message)
-
end
-
parsed.store(:stream, true)
-
@client.request(
-
method: :post,
-
path: "chat/completions",
-
headers: {"accept" => "text/event-stream"},
-
body: parsed,
-
stream: OpenAI::Internal::Stream,
-
model: OpenAI::Chat::ChatCompletionChunk,
-
options: options
-
)
-
end
-
-
# Get a stored chat completion. Only Chat Completions that have been created with
-
# the `store` parameter set to `true` will be returned.
-
#
-
# @overload retrieve(completion_id, request_options: {})
-
#
-
# @param completion_id [String] The ID of the chat completion to retrieve.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletion]
-
#
-
# @see OpenAI::Models::Chat::CompletionRetrieveParams
-
1
def retrieve(completion_id, params = {})
-
@client.request(
-
method: :get,
-
path: ["chat/completions/%1$s", completion_id],
-
model: OpenAI::Chat::ChatCompletion,
-
options: params[:request_options]
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::CompletionUpdateParams} for more details.
-
#
-
# Modify a stored chat completion. Only Chat Completions that have been created
-
# with the `store` parameter set to `true` can be modified. Currently, the only
-
# supported modification is to update the `metadata` field.
-
#
-
# @overload update(completion_id, metadata:, request_options: {})
-
#
-
# @param completion_id [String] The ID of the chat completion to update.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletion]
-
#
-
# @see OpenAI::Models::Chat::CompletionUpdateParams
-
1
def update(completion_id, params)
-
parsed, options = OpenAI::Chat::CompletionUpdateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: ["chat/completions/%1$s", completion_id],
-
body: parsed,
-
model: OpenAI::Chat::ChatCompletion,
-
options: options
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::CompletionListParams} for more details.
-
#
-
# List stored Chat Completions. Only Chat Completions that have been stored with
-
# the `store` parameter set to `true` will be returned.
-
#
-
# @overload list(after: nil, limit: nil, metadata: nil, model: nil, order: nil, request_options: {})
-
#
-
# @param after [String] Identifier for the last chat completion from the previous pagination request.
-
#
-
# @param limit [Integer] Number of Chat Completions to retrieve.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] A list of metadata keys to filter the Chat Completions by. Example:
-
#
-
# @param model [String] The model used to generate the Chat Completions.
-
#
-
# @param order [Symbol, OpenAI::Models::Chat::CompletionListParams::Order] Sort order for Chat Completions by timestamp. Use `asc` for ascending order or `
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::Chat::ChatCompletion>]
-
#
-
# @see OpenAI::Models::Chat::CompletionListParams
-
1
def list(params = {})
-
parsed, options = OpenAI::Chat::CompletionListParams.dump_request(params)
-
@client.request(
-
method: :get,
-
path: "chat/completions",
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::Chat::ChatCompletion,
-
options: options
-
)
-
end
-
-
# Delete a stored chat completion. Only Chat Completions that have been created
-
# with the `store` parameter set to `true` can be deleted.
-
#
-
# @overload delete(completion_id, request_options: {})
-
#
-
# @param completion_id [String] The ID of the chat completion to delete.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Chat::ChatCompletionDeleted]
-
#
-
# @see OpenAI::Models::Chat::CompletionDeleteParams
-
1
def delete(completion_id, params = {})
-
@client.request(
-
method: :delete,
-
path: ["chat/completions/%1$s", completion_id],
-
model: OpenAI::Chat::ChatCompletionDeleted,
-
options: params[:request_options]
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
2
@messages = OpenAI::Resources::Chat::Completions::Messages.new(client: client)
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Chat
-
1
class Completions
-
1
class Messages
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Chat::Completions::MessageListParams} for more details.
-
#
-
# Get the messages in a stored chat completion. Only Chat Completions that have
-
# been created with the `store` parameter set to `true` will be returned.
-
#
-
# @overload list(completion_id, after: nil, limit: nil, order: nil, request_options: {})
-
#
-
# @param completion_id [String] The ID of the chat completion to retrieve messages from.
-
#
-
# @param after [String] Identifier for the last message from the previous pagination request.
-
#
-
# @param limit [Integer] Number of messages to retrieve.
-
#
-
# @param order [Symbol, OpenAI::Models::Chat::Completions::MessageListParams::Order] Sort order for messages by timestamp. Use `asc` for ascending order or `desc` fo
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::Chat::ChatCompletionStoreMessage>]
-
#
-
# @see OpenAI::Models::Chat::Completions::MessageListParams
-
1
def list(completion_id, params = {})
-
parsed, options = OpenAI::Chat::Completions::MessageListParams.dump_request(params)
-
@client.request(
-
method: :get,
-
path: ["chat/completions/%1$s/messages", completion_id],
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::Chat::ChatCompletionStoreMessage,
-
options: options
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Completions
-
# See {OpenAI::Resources::Completions#create_streaming} for streaming counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::CompletionCreateParams} for more details.
-
#
-
# Creates a completion for the provided prompt and parameters.
-
#
-
# @overload create(model:, prompt:, best_of: nil, echo: nil, frequency_penalty: nil, logit_bias: nil, logprobs: nil, max_tokens: nil, n: nil, presence_penalty: nil, seed: nil, stop: nil, stream_options: nil, suffix: nil, temperature: nil, top_p: nil, user: nil, request_options: {})
-
#
-
# @param model [String, Symbol, OpenAI::Models::CompletionCreateParams::Model] ID of the model to use. You can use the [List models](https://platform.openai.co
-
#
-
# @param prompt [String, Array<String>, Array<Integer>, Array<Array<Integer>>, nil] The prompt(s) to generate completions for, encoded as a string, array of strings
-
#
-
# @param best_of [Integer, nil] Generates `best_of` completions server-side and returns the "best" (the one with
-
#
-
# @param echo [Boolean, nil] Echo back the prompt in addition to the completion
-
#
-
# @param frequency_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on their
-
#
-
# @param logit_bias [Hash{Symbol=>Integer}, nil] Modify the likelihood of specified tokens appearing in the completion.
-
#
-
# @param logprobs [Integer, nil] Include the log probabilities on the `logprobs` most likely output tokens, as we
-
#
-
# @param max_tokens [Integer, nil] The maximum number of [tokens](/tokenizer) that can be generated in the completi
-
#
-
# @param n [Integer, nil] How many completions to generate for each prompt.
-
#
-
# @param presence_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on whethe
-
#
-
# @param seed [Integer, nil] If specified, our system will make a best effort to sample deterministically, su
-
#
-
# @param stop [String, Array<String>, nil] Not supported with latest reasoning models `o3` and `o4-mini`.
-
#
-
# @param stream_options [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] Options for streaming response. Only set this when you set `stream: true`.
-
#
-
# @param suffix [String, nil] The suffix that comes after a completion of inserted text.
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the
-
#
-
# @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Completion]
-
#
-
# @see OpenAI::Models::CompletionCreateParams
-
1
def create(params)
-
parsed, options = OpenAI::CompletionCreateParams.dump_request(params)
-
then: 0
else: 0
if parsed[:stream]
-
message = "Please use `#create_streaming` for the streaming use case."
-
raise ArgumentError.new(message)
-
end
-
@client.request(
-
method: :post,
-
path: "completions",
-
body: parsed,
-
model: OpenAI::Completion,
-
options: options
-
)
-
end
-
-
# See {OpenAI::Resources::Completions#create} for non-streaming counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::CompletionCreateParams} for more details.
-
#
-
# Creates a completion for the provided prompt and parameters.
-
#
-
# @overload create_streaming(model:, prompt:, best_of: nil, echo: nil, frequency_penalty: nil, logit_bias: nil, logprobs: nil, max_tokens: nil, n: nil, presence_penalty: nil, seed: nil, stop: nil, stream_options: nil, suffix: nil, temperature: nil, top_p: nil, user: nil, request_options: {})
-
#
-
# @param model [String, Symbol, OpenAI::Models::CompletionCreateParams::Model] ID of the model to use. You can use the [List models](https://platform.openai.co
-
#
-
# @param prompt [String, Array<String>, Array<Integer>, Array<Array<Integer>>, nil] The prompt(s) to generate completions for, encoded as a string, array of strings
-
#
-
# @param best_of [Integer, nil] Generates `best_of` completions server-side and returns the "best" (the one with
-
#
-
# @param echo [Boolean, nil] Echo back the prompt in addition to the completion
-
#
-
# @param frequency_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on their
-
#
-
# @param logit_bias [Hash{Symbol=>Integer}, nil] Modify the likelihood of specified tokens appearing in the completion.
-
#
-
# @param logprobs [Integer, nil] Include the log probabilities on the `logprobs` most likely output tokens, as we
-
#
-
# @param max_tokens [Integer, nil] The maximum number of [tokens](/tokenizer) that can be generated in the completi
-
#
-
# @param n [Integer, nil] How many completions to generate for each prompt.
-
#
-
# @param presence_penalty [Float, nil] Number between -2.0 and 2.0. Positive values penalize new tokens based on whethe
-
#
-
# @param seed [Integer, nil] If specified, our system will make a best effort to sample deterministically, su
-
#
-
# @param stop [String, Array<String>, nil] Not supported with latest reasoning models `o3` and `o4-mini`.
-
#
-
# @param stream_options [OpenAI::Models::Chat::ChatCompletionStreamOptions, nil] Options for streaming response. Only set this when you set `stream: true`.
-
#
-
# @param suffix [String, nil] The suffix that comes after a completion of inserted text.
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling, where the
-
#
-
# @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::Stream<OpenAI::Models::Completion>]
-
#
-
# @see OpenAI::Models::CompletionCreateParams
-
1
def create_streaming(params)
-
parsed, options = OpenAI::CompletionCreateParams.dump_request(params)
-
else: 0
then: 0
unless parsed.fetch(:stream, true)
-
message = "Please use `#create` for the non-streaming use case."
-
raise ArgumentError.new(message)
-
end
-
parsed.store(:stream, true)
-
@client.request(
-
method: :post,
-
path: "completions",
-
headers: {"accept" => "text/event-stream"},
-
body: parsed,
-
stream: OpenAI::Internal::Stream,
-
model: OpenAI::Completion,
-
options: options
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Containers
-
# @return [OpenAI::Resources::Containers::Files]
-
1
attr_reader :files
-
-
# Create Container
-
#
-
# @overload create(name:, expires_after: nil, file_ids: nil, request_options: {})
-
#
-
# @param name [String] Name of the container to create.
-
#
-
# @param expires_after [OpenAI::Models::ContainerCreateParams::ExpiresAfter] Container expiration time in seconds relative to the 'anchor' time.
-
#
-
# @param file_ids [Array<String>] IDs of files to copy to the container.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::ContainerCreateResponse]
-
#
-
# @see OpenAI::Models::ContainerCreateParams
-
1
def create(params)
-
parsed, options = OpenAI::ContainerCreateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: "containers",
-
body: parsed,
-
model: OpenAI::Models::ContainerCreateResponse,
-
options: options
-
)
-
end
-
-
# Retrieve Container
-
#
-
# @overload retrieve(container_id, request_options: {})
-
#
-
# @param container_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::ContainerRetrieveResponse]
-
#
-
# @see OpenAI::Models::ContainerRetrieveParams
-
1
def retrieve(container_id, params = {})
-
@client.request(
-
method: :get,
-
path: ["containers/%1$s", container_id],
-
model: OpenAI::Models::ContainerRetrieveResponse,
-
options: params[:request_options]
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ContainerListParams} for more details.
-
#
-
# List Containers
-
#
-
# @overload list(after: nil, limit: nil, order: nil, request_options: {})
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param order [Symbol, OpenAI::Models::ContainerListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::ContainerListResponse>]
-
#
-
# @see OpenAI::Models::ContainerListParams
-
1
def list(params = {})
-
parsed, options = OpenAI::ContainerListParams.dump_request(params)
-
@client.request(
-
method: :get,
-
path: "containers",
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::Models::ContainerListResponse,
-
options: options
-
)
-
end
-
-
# Delete Container
-
#
-
# @overload delete(container_id, request_options: {})
-
#
-
# @param container_id [String] The ID of the container to delete.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [nil]
-
#
-
# @see OpenAI::Models::ContainerDeleteParams
-
1
def delete(container_id, params = {})
-
@client.request(
-
method: :delete,
-
path: ["containers/%1$s", container_id],
-
model: NilClass,
-
options: params[:request_options]
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
2
@files = OpenAI::Resources::Containers::Files.new(client: client)
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Containers
-
1
class Files
-
# @return [OpenAI::Resources::Containers::Files::Content]
-
1
attr_reader :content
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Containers::FileCreateParams} for more details.
-
#
-
# Create a Container File
-
#
-
# You can send either a multipart/form-data request with the raw file content, or
-
# a JSON request with a file ID.
-
#
-
# @overload create(container_id, file: nil, file_id: nil, request_options: {})
-
#
-
# @param container_id [String]
-
#
-
# @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The File object (not file name) to be uploaded.
-
#
-
# @param file_id [String] Name of the file to create.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Containers::FileCreateResponse]
-
#
-
# @see OpenAI::Models::Containers::FileCreateParams
-
1
def create(container_id, params = {})
-
parsed, options = OpenAI::Containers::FileCreateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: ["containers/%1$s/files", container_id],
-
headers: {"content-type" => "multipart/form-data"},
-
body: parsed,
-
model: OpenAI::Models::Containers::FileCreateResponse,
-
options: options
-
)
-
end
-
-
# Retrieve Container File
-
#
-
# @overload retrieve(file_id, container_id:, request_options: {})
-
#
-
# @param file_id [String]
-
# @param container_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Containers::FileRetrieveResponse]
-
#
-
# @see OpenAI::Models::Containers::FileRetrieveParams
-
1
def retrieve(file_id, params)
-
parsed, options = OpenAI::Containers::FileRetrieveParams.dump_request(params)
-
container_id =
-
parsed.delete(:container_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :get,
-
path: ["containers/%1$s/files/%2$s", container_id, file_id],
-
model: OpenAI::Models::Containers::FileRetrieveResponse,
-
options: options
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Containers::FileListParams} for more details.
-
#
-
# List Container files
-
#
-
# @overload list(container_id, after: nil, limit: nil, order: nil, request_options: {})
-
#
-
# @param container_id [String]
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param order [Symbol, OpenAI::Models::Containers::FileListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::Containers::FileListResponse>]
-
#
-
# @see OpenAI::Models::Containers::FileListParams
-
1
def list(container_id, params = {})
-
parsed, options = OpenAI::Containers::FileListParams.dump_request(params)
-
@client.request(
-
method: :get,
-
path: ["containers/%1$s/files", container_id],
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::Models::Containers::FileListResponse,
-
options: options
-
)
-
end
-
-
# Delete Container File
-
#
-
# @overload delete(file_id, container_id:, request_options: {})
-
#
-
# @param file_id [String]
-
# @param container_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [nil]
-
#
-
# @see OpenAI::Models::Containers::FileDeleteParams
-
1
def delete(file_id, params)
-
parsed, options = OpenAI::Containers::FileDeleteParams.dump_request(params)
-
container_id =
-
parsed.delete(:container_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :delete,
-
path: ["containers/%1$s/files/%2$s", container_id, file_id],
-
model: NilClass,
-
options: options
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
2
@content = OpenAI::Resources::Containers::Files::Content.new(client: client)
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Containers
-
1
class Files
-
1
class Content
-
# Retrieve Container File Content
-
#
-
# @overload retrieve(file_id, container_id:, request_options: {})
-
#
-
# @param file_id [String]
-
# @param container_id [String]
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [StringIO]
-
#
-
# @see OpenAI::Models::Containers::Files::ContentRetrieveParams
-
1
def retrieve(file_id, params)
-
parsed, options = OpenAI::Containers::Files::ContentRetrieveParams.dump_request(params)
-
container_id =
-
parsed.delete(:container_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :get,
-
path: ["containers/%1$s/files/%2$s/content", container_id, file_id],
-
headers: {"accept" => "application/binary"},
-
model: StringIO,
-
options: options
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Embeddings
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EmbeddingCreateParams} for more details.
-
#
-
# Creates an embedding vector representing the input text.
-
#
-
# @overload create(input:, model:, dimensions: nil, encoding_format: nil, user: nil, request_options: {})
-
#
-
# @param input [String, Array<String>, Array<Integer>, Array<Array<Integer>>] Input text to embed, encoded as a string or array of tokens. To embed multiple i
-
#
-
# @param model [String, Symbol, OpenAI::Models::EmbeddingModel] ID of the model to use. You can use the [List models](https://platform.openai.co
-
#
-
# @param dimensions [Integer] The number of dimensions the resulting output embeddings should have. Only suppo
-
#
-
# @param encoding_format [Symbol, OpenAI::Models::EmbeddingCreateParams::EncodingFormat] The format to return the embeddings in. Can be either `float` or [`base64`](http
-
#
-
# @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::CreateEmbeddingResponse]
-
#
-
# @see OpenAI::Models::EmbeddingCreateParams
-
1
def create(params)
-
parsed, options = OpenAI::EmbeddingCreateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: "embeddings",
-
body: parsed,
-
model: OpenAI::CreateEmbeddingResponse,
-
options: options
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Evals
-
# @return [OpenAI::Resources::Evals::Runs]
-
1
attr_reader :runs
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalCreateParams} for more details.
-
#
-
# Create the structure of an evaluation that can be used to test a model's
-
# performance. An evaluation is a set of testing criteria and the config for a
-
# data source, which dictates the schema of the data used in the evaluation. After
-
# creating an evaluation, you can run it on different models and model parameters.
-
# We support several types of graders and datasources. For more information, see
-
# the [Evals guide](https://platform.openai.com/docs/guides/evals).
-
#
-
# @overload create(data_source_config:, testing_criteria:, metadata: nil, name: nil, request_options: {})
-
#
-
# @param data_source_config [OpenAI::Models::EvalCreateParams::DataSourceConfig::Custom, OpenAI::Models::EvalCreateParams::DataSourceConfig::Logs, OpenAI::Models::EvalCreateParams::DataSourceConfig::StoredCompletions] The configuration for the data source used for the evaluation runs. Dictates the
-
#
-
# @param testing_criteria [Array<OpenAI::Models::EvalCreateParams::TestingCriterion::LabelModel, OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::EvalCreateParams::TestingCriterion::TextSimilarity, OpenAI::Models::EvalCreateParams::TestingCriterion::Python, OpenAI::Models::EvalCreateParams::TestingCriterion::ScoreModel>] A list of graders for all eval runs in this group. Graders can reference variabl
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param name [String] The name of the evaluation.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::EvalCreateResponse]
-
#
-
# @see OpenAI::Models::EvalCreateParams
-
1
def create(params)
-
parsed, options = OpenAI::EvalCreateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: "evals",
-
body: parsed,
-
model: OpenAI::Models::EvalCreateResponse,
-
options: options
-
)
-
end
-
-
# Get an evaluation by ID.
-
#
-
# @overload retrieve(eval_id, request_options: {})
-
#
-
# @param eval_id [String] The ID of the evaluation to retrieve.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::EvalRetrieveResponse]
-
#
-
# @see OpenAI::Models::EvalRetrieveParams
-
1
def retrieve(eval_id, params = {})
-
@client.request(
-
method: :get,
-
path: ["evals/%1$s", eval_id],
-
model: OpenAI::Models::EvalRetrieveResponse,
-
options: params[:request_options]
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalUpdateParams} for more details.
-
#
-
# Update certain properties of an evaluation.
-
#
-
# @overload update(eval_id, metadata: nil, name: nil, request_options: {})
-
#
-
# @param eval_id [String] The ID of the evaluation to update.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param name [String] Rename the evaluation.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::EvalUpdateResponse]
-
#
-
# @see OpenAI::Models::EvalUpdateParams
-
1
def update(eval_id, params = {})
-
parsed, options = OpenAI::EvalUpdateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: ["evals/%1$s", eval_id],
-
body: parsed,
-
model: OpenAI::Models::EvalUpdateResponse,
-
options: options
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::EvalListParams} for more details.
-
#
-
# List evaluations for a project.
-
#
-
# @overload list(after: nil, limit: nil, order: nil, order_by: nil, request_options: {})
-
#
-
# @param after [String] Identifier for the last eval from the previous pagination request.
-
#
-
# @param limit [Integer] Number of evals to retrieve.
-
#
-
# @param order [Symbol, OpenAI::Models::EvalListParams::Order] Sort order for evals by timestamp. Use `asc` for ascending order or `desc` for d
-
#
-
# @param order_by [Symbol, OpenAI::Models::EvalListParams::OrderBy] Evals can be ordered by creation time or last updated time. Use
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::EvalListResponse>]
-
#
-
# @see OpenAI::Models::EvalListParams
-
1
def list(params = {})
-
parsed, options = OpenAI::EvalListParams.dump_request(params)
-
@client.request(
-
method: :get,
-
path: "evals",
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::Models::EvalListResponse,
-
options: options
-
)
-
end
-
-
# Delete an evaluation.
-
#
-
# @overload delete(eval_id, request_options: {})
-
#
-
# @param eval_id [String] The ID of the evaluation to delete.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::EvalDeleteResponse]
-
#
-
# @see OpenAI::Models::EvalDeleteParams
-
1
def delete(eval_id, params = {})
-
@client.request(
-
method: :delete,
-
path: ["evals/%1$s", eval_id],
-
model: OpenAI::Models::EvalDeleteResponse,
-
options: params[:request_options]
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
2
@runs = OpenAI::Resources::Evals::Runs.new(client: client)
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Evals
-
1
class Runs
-
# @return [OpenAI::Resources::Evals::Runs::OutputItems]
-
1
attr_reader :output_items
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunCreateParams} for more details.
-
#
-
# Kicks off a new run for a given evaluation, specifying the data source, and what
-
# model configuration to use to test. The datasource will be validated against the
-
# schema specified in the config of the evaluation.
-
#
-
# @overload create(eval_id, data_source:, metadata: nil, name: nil, request_options: {})
-
#
-
# @param eval_id [String] The ID of the evaluation to create a run for.
-
#
-
# @param data_source [OpenAI::Models::Evals::CreateEvalJSONLRunDataSource, OpenAI::Models::Evals::CreateEvalCompletionsRunDataSource, OpenAI::Models::Evals::RunCreateParams::DataSource::CreateEvalResponsesRunDataSource] Details about the run's data source.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param name [String] The name of the run.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Evals::RunCreateResponse]
-
#
-
# @see OpenAI::Models::Evals::RunCreateParams
-
1
def create(eval_id, params)
-
parsed, options = OpenAI::Evals::RunCreateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: ["evals/%1$s/runs", eval_id],
-
body: parsed,
-
model: OpenAI::Models::Evals::RunCreateResponse,
-
options: options
-
)
-
end
-
-
# Get an evaluation run by ID.
-
#
-
# @overload retrieve(run_id, eval_id:, request_options: {})
-
#
-
# @param run_id [String] The ID of the run to retrieve.
-
#
-
# @param eval_id [String] The ID of the evaluation to retrieve runs for.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Evals::RunRetrieveResponse]
-
#
-
# @see OpenAI::Models::Evals::RunRetrieveParams
-
1
def retrieve(run_id, params)
-
parsed, options = OpenAI::Evals::RunRetrieveParams.dump_request(params)
-
eval_id =
-
parsed.delete(:eval_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :get,
-
path: ["evals/%1$s/runs/%2$s", eval_id, run_id],
-
model: OpenAI::Models::Evals::RunRetrieveResponse,
-
options: options
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::RunListParams} for more details.
-
#
-
# Get a list of runs for an evaluation.
-
#
-
# @overload list(eval_id, after: nil, limit: nil, order: nil, status: nil, request_options: {})
-
#
-
# @param eval_id [String] The ID of the evaluation to retrieve runs for.
-
#
-
# @param after [String] Identifier for the last run from the previous pagination request.
-
#
-
# @param limit [Integer] Number of runs to retrieve.
-
#
-
# @param order [Symbol, OpenAI::Models::Evals::RunListParams::Order] Sort order for runs by timestamp. Use `asc` for ascending order or `desc` for de
-
#
-
# @param status [Symbol, OpenAI::Models::Evals::RunListParams::Status] Filter runs by status. One of `queued` | `in_progress` | `failed` | `completed`
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::Evals::RunListResponse>]
-
#
-
# @see OpenAI::Models::Evals::RunListParams
-
1
def list(eval_id, params = {})
-
parsed, options = OpenAI::Evals::RunListParams.dump_request(params)
-
@client.request(
-
method: :get,
-
path: ["evals/%1$s/runs", eval_id],
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::Models::Evals::RunListResponse,
-
options: options
-
)
-
end
-
-
# Delete an eval run.
-
#
-
# @overload delete(run_id, eval_id:, request_options: {})
-
#
-
# @param run_id [String] The ID of the run to delete.
-
#
-
# @param eval_id [String] The ID of the evaluation to delete the run from.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Evals::RunDeleteResponse]
-
#
-
# @see OpenAI::Models::Evals::RunDeleteParams
-
1
def delete(run_id, params)
-
parsed, options = OpenAI::Evals::RunDeleteParams.dump_request(params)
-
eval_id =
-
parsed.delete(:eval_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :delete,
-
path: ["evals/%1$s/runs/%2$s", eval_id, run_id],
-
model: OpenAI::Models::Evals::RunDeleteResponse,
-
options: options
-
)
-
end
-
-
# Cancel an ongoing evaluation run.
-
#
-
# @overload cancel(run_id, eval_id:, request_options: {})
-
#
-
# @param run_id [String] The ID of the run to cancel.
-
#
-
# @param eval_id [String] The ID of the evaluation whose run you want to cancel.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Evals::RunCancelResponse]
-
#
-
# @see OpenAI::Models::Evals::RunCancelParams
-
1
def cancel(run_id, params)
-
parsed, options = OpenAI::Evals::RunCancelParams.dump_request(params)
-
eval_id =
-
parsed.delete(:eval_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :post,
-
path: ["evals/%1$s/runs/%2$s", eval_id, run_id],
-
model: OpenAI::Models::Evals::RunCancelResponse,
-
options: options
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
2
@output_items = OpenAI::Resources::Evals::Runs::OutputItems.new(client: client)
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Evals
-
1
class Runs
-
1
class OutputItems
-
# Get an evaluation run output item by ID.
-
#
-
# @overload retrieve(output_item_id, eval_id:, run_id:, request_options: {})
-
#
-
# @param output_item_id [String] The ID of the output item to retrieve.
-
#
-
# @param eval_id [String] The ID of the evaluation to retrieve runs for.
-
#
-
# @param run_id [String] The ID of the run to retrieve.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse]
-
#
-
# @see OpenAI::Models::Evals::Runs::OutputItemRetrieveParams
-
1
def retrieve(output_item_id, params)
-
parsed, options = OpenAI::Evals::Runs::OutputItemRetrieveParams.dump_request(params)
-
eval_id =
-
parsed.delete(:eval_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
run_id =
-
parsed.delete(:run_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :get,
-
path: ["evals/%1$s/runs/%2$s/output_items/%3$s", eval_id, run_id, output_item_id],
-
model: OpenAI::Models::Evals::Runs::OutputItemRetrieveResponse,
-
options: options
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Evals::Runs::OutputItemListParams} for more details.
-
#
-
# Get a list of output items for an evaluation run.
-
#
-
# @overload list(run_id, eval_id:, after: nil, limit: nil, order: nil, status: nil, request_options: {})
-
#
-
# @param run_id [String] Path param: The ID of the run to retrieve output items for.
-
#
-
# @param eval_id [String] Path param: The ID of the evaluation to retrieve runs for.
-
#
-
# @param after [String] Query param: Identifier for the last output item from the previous pagination re
-
#
-
# @param limit [Integer] Query param: Number of output items to retrieve.
-
#
-
# @param order [Symbol, OpenAI::Models::Evals::Runs::OutputItemListParams::Order] Query param: Sort order for output items by timestamp. Use `asc` for ascending o
-
#
-
# @param status [Symbol, OpenAI::Models::Evals::Runs::OutputItemListParams::Status] Query param: Filter output items by status. Use `failed` to filter by failed out
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::Evals::Runs::OutputItemListResponse>]
-
#
-
# @see OpenAI::Models::Evals::Runs::OutputItemListParams
-
1
def list(run_id, params)
-
parsed, options = OpenAI::Evals::Runs::OutputItemListParams.dump_request(params)
-
eval_id =
-
parsed.delete(:eval_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :get,
-
path: ["evals/%1$s/runs/%2$s/output_items", eval_id, run_id],
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::Models::Evals::Runs::OutputItemListResponse,
-
options: options
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Files
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FileCreateParams} for more details.
-
#
-
# Upload a file that can be used across various endpoints. Individual files can be
-
# up to 512 MB, and the size of all files uploaded by one organization can be up
-
# to 100 GB.
-
#
-
# The Assistants API supports files up to 2 million tokens and of specific file
-
# types. See the
-
# [Assistants Tools guide](https://platform.openai.com/docs/assistants/tools) for
-
# details.
-
#
-
# The Fine-tuning API only supports `.jsonl` files. The input also has certain
-
# required formats for fine-tuning
-
# [chat](https://platform.openai.com/docs/api-reference/fine-tuning/chat-input) or
-
# [completions](https://platform.openai.com/docs/api-reference/fine-tuning/completions-input)
-
# models.
-
#
-
# The Batch API only supports `.jsonl` files up to 200 MB in size. The input also
-
# has a specific required
-
# [format](https://platform.openai.com/docs/api-reference/batch/request-input).
-
#
-
# Please [contact us](https://help.openai.com/) if you need to increase these
-
# storage limits.
-
#
-
# @overload create(file:, purpose:, request_options: {})
-
#
-
# @param file [Pathname, StringIO, IO, String, OpenAI::FilePart] The File object (not file name) to be uploaded.
-
#
-
# @param purpose [Symbol, OpenAI::Models::FilePurpose] The intended purpose of the uploaded file. One of: - `assistants`: Used in the A
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::FileObject]
-
#
-
# @see OpenAI::Models::FileCreateParams
-
1
def create(params)
-
parsed, options = OpenAI::FileCreateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: "files",
-
headers: {"content-type" => "multipart/form-data"},
-
body: parsed,
-
model: OpenAI::FileObject,
-
options: options
-
)
-
end
-
-
# Returns information about a specific file.
-
#
-
# @overload retrieve(file_id, request_options: {})
-
#
-
# @param file_id [String] The ID of the file to use for this request.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::FileObject]
-
#
-
# @see OpenAI::Models::FileRetrieveParams
-
1
def retrieve(file_id, params = {})
-
@client.request(
-
method: :get,
-
path: ["files/%1$s", file_id],
-
model: OpenAI::FileObject,
-
options: params[:request_options]
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FileListParams} for more details.
-
#
-
# Returns a list of files.
-
#
-
# @overload list(after: nil, limit: nil, order: nil, purpose: nil, request_options: {})
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param order [Symbol, OpenAI::Models::FileListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord
-
#
-
# @param purpose [String] Only return files with the given purpose.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::FileObject>]
-
#
-
# @see OpenAI::Models::FileListParams
-
1
def list(params = {})
-
parsed, options = OpenAI::FileListParams.dump_request(params)
-
@client.request(
-
method: :get,
-
path: "files",
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::FileObject,
-
options: options
-
)
-
end
-
-
# Delete a file.
-
#
-
# @overload delete(file_id, request_options: {})
-
#
-
# @param file_id [String] The ID of the file to use for this request.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::FileDeleted]
-
#
-
# @see OpenAI::Models::FileDeleteParams
-
1
def delete(file_id, params = {})
-
@client.request(
-
method: :delete,
-
path: ["files/%1$s", file_id],
-
model: OpenAI::FileDeleted,
-
options: params[:request_options]
-
)
-
end
-
-
# Returns the contents of the specified file.
-
#
-
# @overload content(file_id, request_options: {})
-
#
-
# @param file_id [String] The ID of the file to use for this request.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [StringIO]
-
#
-
# @see OpenAI::Models::FileContentParams
-
1
def content(file_id, params = {})
-
@client.request(
-
method: :get,
-
path: ["files/%1$s/content", file_id],
-
headers: {"accept" => "application/binary"},
-
model: StringIO,
-
options: params[:request_options]
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class FineTuning
-
# @return [OpenAI::Resources::FineTuning::Methods]
-
1
attr_reader :methods_
-
-
# @return [OpenAI::Resources::FineTuning::Jobs]
-
1
attr_reader :jobs
-
-
# @return [OpenAI::Resources::FineTuning::Checkpoints]
-
1
attr_reader :checkpoints
-
-
# @return [OpenAI::Resources::FineTuning::Alpha]
-
1
attr_reader :alpha
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
2
@methods_ = OpenAI::Resources::FineTuning::Methods.new(client: client)
-
2
@jobs = OpenAI::Resources::FineTuning::Jobs.new(client: client)
-
2
@checkpoints = OpenAI::Resources::FineTuning::Checkpoints.new(client: client)
-
2
@alpha = OpenAI::Resources::FineTuning::Alpha.new(client: client)
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class FineTuning
-
1
class Alpha
-
# @return [OpenAI::Resources::FineTuning::Alpha::Graders]
-
1
attr_reader :graders
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
2
@graders = OpenAI::Resources::FineTuning::Alpha::Graders.new(client: client)
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class FineTuning
-
1
class Alpha
-
1
class Graders
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::Alpha::GraderRunParams} for more details.
-
#
-
# Run a grader.
-
#
-
# @overload run(grader:, model_sample:, item: nil, request_options: {})
-
#
-
# @param grader [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] The grader used for the fine-tuning job.
-
#
-
# @param model_sample [String] The model sample to be evaluated. This value will be used to populate
-
#
-
# @param item [Object] The dataset item provided to the grader. This will be used to populate
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::FineTuning::Alpha::GraderRunResponse]
-
#
-
# @see OpenAI::Models::FineTuning::Alpha::GraderRunParams
-
1
def run(params)
-
parsed, options = OpenAI::FineTuning::Alpha::GraderRunParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: "fine_tuning/alpha/graders/run",
-
body: parsed,
-
model: OpenAI::Models::FineTuning::Alpha::GraderRunResponse,
-
options: options
-
)
-
end
-
-
# Validate a grader.
-
#
-
# @overload validate(grader:, request_options: {})
-
#
-
# @param grader [OpenAI::Models::Graders::StringCheckGrader, OpenAI::Models::Graders::TextSimilarityGrader, OpenAI::Models::Graders::PythonGrader, OpenAI::Models::Graders::ScoreModelGrader, OpenAI::Models::Graders::MultiGrader] The grader used for the fine-tuning job.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::FineTuning::Alpha::GraderValidateResponse]
-
#
-
# @see OpenAI::Models::FineTuning::Alpha::GraderValidateParams
-
1
def validate(params)
-
parsed, options = OpenAI::FineTuning::Alpha::GraderValidateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: "fine_tuning/alpha/graders/validate",
-
body: parsed,
-
model: OpenAI::Models::FineTuning::Alpha::GraderValidateResponse,
-
options: options
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class FineTuning
-
1
class Checkpoints
-
# @return [OpenAI::Resources::FineTuning::Checkpoints::Permissions]
-
1
attr_reader :permissions
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
2
@permissions = OpenAI::Resources::FineTuning::Checkpoints::Permissions.new(client: client)
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class FineTuning
-
1
class Checkpoints
-
1
class Permissions
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::Checkpoints::PermissionCreateParams} for more
-
# details.
-
#
-
# **NOTE:** Calling this endpoint requires an [admin API key](../admin-api-keys).
-
#
-
# This enables organization owners to share fine-tuned models with other projects
-
# in their organization.
-
#
-
# @overload create(fine_tuned_model_checkpoint, project_ids:, request_options: {})
-
#
-
# @param fine_tuned_model_checkpoint [String] The ID of the fine-tuned model checkpoint to create a permission for.
-
#
-
# @param project_ids [Array<String>] The project identifiers to grant access to.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::Page<OpenAI::Models::FineTuning::Checkpoints::PermissionCreateResponse>]
-
#
-
# @see OpenAI::Models::FineTuning::Checkpoints::PermissionCreateParams
-
1
def create(fine_tuned_model_checkpoint, params)
-
parsed, options = OpenAI::FineTuning::Checkpoints::PermissionCreateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: ["fine_tuning/checkpoints/%1$s/permissions", fine_tuned_model_checkpoint],
-
body: parsed,
-
page: OpenAI::Internal::Page,
-
model: OpenAI::Models::FineTuning::Checkpoints::PermissionCreateResponse,
-
options: options
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams} for more
-
# details.
-
#
-
# **NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
-
#
-
# Organization owners can use this endpoint to view all permissions for a
-
# fine-tuned model checkpoint.
-
#
-
# @overload retrieve(fine_tuned_model_checkpoint, after: nil, limit: nil, order: nil, project_id: nil, request_options: {})
-
#
-
# @param fine_tuned_model_checkpoint [String] The ID of the fine-tuned model checkpoint to get permissions for.
-
#
-
# @param after [String] Identifier for the last permission ID from the previous pagination request.
-
#
-
# @param limit [Integer] Number of permissions to retrieve.
-
#
-
# @param order [Symbol, OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams::Order] The order in which to retrieve permissions.
-
#
-
# @param project_id [String] The ID of the project to get permissions for.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse]
-
#
-
# @see OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveParams
-
1
def retrieve(fine_tuned_model_checkpoint, params = {})
-
parsed, options = OpenAI::FineTuning::Checkpoints::PermissionRetrieveParams.dump_request(params)
-
@client.request(
-
method: :get,
-
path: ["fine_tuning/checkpoints/%1$s/permissions", fine_tuned_model_checkpoint],
-
query: parsed,
-
model: OpenAI::Models::FineTuning::Checkpoints::PermissionRetrieveResponse,
-
options: options
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::Checkpoints::PermissionDeleteParams} for more
-
# details.
-
#
-
# **NOTE:** This endpoint requires an [admin API key](../admin-api-keys).
-
#
-
# Organization owners can use this endpoint to delete a permission for a
-
# fine-tuned model checkpoint.
-
#
-
# @overload delete(permission_id, fine_tuned_model_checkpoint:, request_options: {})
-
#
-
# @param permission_id [String] The ID of the fine-tuned model checkpoint permission to delete.
-
#
-
# @param fine_tuned_model_checkpoint [String] The ID of the fine-tuned model checkpoint to delete a permission for.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::FineTuning::Checkpoints::PermissionDeleteResponse]
-
#
-
# @see OpenAI::Models::FineTuning::Checkpoints::PermissionDeleteParams
-
1
def delete(permission_id, params)
-
parsed, options = OpenAI::FineTuning::Checkpoints::PermissionDeleteParams.dump_request(params)
-
fine_tuned_model_checkpoint =
-
parsed.delete(:fine_tuned_model_checkpoint) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :delete,
-
path: [
-
"fine_tuning/checkpoints/%1$s/permissions/%2$s",
-
fine_tuned_model_checkpoint,
-
permission_id
-
],
-
model: OpenAI::Models::FineTuning::Checkpoints::PermissionDeleteResponse,
-
options: options
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class FineTuning
-
1
class Jobs
-
# @return [OpenAI::Resources::FineTuning::Jobs::Checkpoints]
-
1
attr_reader :checkpoints
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::JobCreateParams} for more details.
-
#
-
# Creates a fine-tuning job which begins the process of creating a new model from
-
# a given dataset.
-
#
-
# Response includes details of the enqueued job including job status and the name
-
# of the fine-tuned models once complete.
-
#
-
# [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
-
#
-
# @overload create(model:, training_file:, hyperparameters: nil, integrations: nil, metadata: nil, method_: nil, seed: nil, suffix: nil, validation_file: nil, request_options: {})
-
#
-
# @param model [String, Symbol, OpenAI::Models::FineTuning::JobCreateParams::Model] The name of the model to fine-tune. You can select one of the
-
#
-
# @param training_file [String] The ID of an uploaded file that contains training data.
-
#
-
# @param hyperparameters [OpenAI::Models::FineTuning::JobCreateParams::Hyperparameters] The hyperparameters used for the fine-tuning job.
-
#
-
# @param integrations [Array<OpenAI::Models::FineTuning::JobCreateParams::Integration>, nil] A list of integrations to enable for your fine-tuning job.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param method_ [OpenAI::Models::FineTuning::JobCreateParams::Method] The method used for fine-tuning.
-
#
-
# @param seed [Integer, nil] The seed controls the reproducibility of the job. Passing in the same seed and j
-
#
-
# @param suffix [String, nil] A string of up to 64 characters that will be added to your fine-tuned model name
-
#
-
# @param validation_file [String, nil] The ID of an uploaded file that contains validation data.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::FineTuning::FineTuningJob]
-
#
-
# @see OpenAI::Models::FineTuning::JobCreateParams
-
1
def create(params)
-
parsed, options = OpenAI::FineTuning::JobCreateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: "fine_tuning/jobs",
-
body: parsed,
-
model: OpenAI::FineTuning::FineTuningJob,
-
options: options
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::JobRetrieveParams} for more details.
-
#
-
# Get info about a fine-tuning job.
-
#
-
# [Learn more about fine-tuning](https://platform.openai.com/docs/guides/model-optimization)
-
#
-
# @overload retrieve(fine_tuning_job_id, request_options: {})
-
#
-
# @param fine_tuning_job_id [String] The ID of the fine-tuning job.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::FineTuning::FineTuningJob]
-
#
-
# @see OpenAI::Models::FineTuning::JobRetrieveParams
-
1
def retrieve(fine_tuning_job_id, params = {})
-
@client.request(
-
method: :get,
-
path: ["fine_tuning/jobs/%1$s", fine_tuning_job_id],
-
model: OpenAI::FineTuning::FineTuningJob,
-
options: params[:request_options]
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::JobListParams} for more details.
-
#
-
# List your organization's fine-tuning jobs
-
#
-
# @overload list(after: nil, limit: nil, metadata: nil, request_options: {})
-
#
-
# @param after [String] Identifier for the last job from the previous pagination request.
-
#
-
# @param limit [Integer] Number of fine-tuning jobs to retrieve.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Optional metadata filter. To filter, use the syntax `metadata[k]=v`. Alternative
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::FineTuning::FineTuningJob>]
-
#
-
# @see OpenAI::Models::FineTuning::JobListParams
-
1
def list(params = {})
-
parsed, options = OpenAI::FineTuning::JobListParams.dump_request(params)
-
@client.request(
-
method: :get,
-
path: "fine_tuning/jobs",
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::FineTuning::FineTuningJob,
-
options: options
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::JobCancelParams} for more details.
-
#
-
# Immediately cancel a fine-tune job.
-
#
-
# @overload cancel(fine_tuning_job_id, request_options: {})
-
#
-
# @param fine_tuning_job_id [String] The ID of the fine-tuning job to cancel.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::FineTuning::FineTuningJob]
-
#
-
# @see OpenAI::Models::FineTuning::JobCancelParams
-
1
def cancel(fine_tuning_job_id, params = {})
-
@client.request(
-
method: :post,
-
path: ["fine_tuning/jobs/%1$s/cancel", fine_tuning_job_id],
-
model: OpenAI::FineTuning::FineTuningJob,
-
options: params[:request_options]
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::JobListEventsParams} for more details.
-
#
-
# Get status updates for a fine-tuning job.
-
#
-
# @overload list_events(fine_tuning_job_id, after: nil, limit: nil, request_options: {})
-
#
-
# @param fine_tuning_job_id [String] The ID of the fine-tuning job to get events for.
-
#
-
# @param after [String] Identifier for the last event from the previous pagination request.
-
#
-
# @param limit [Integer] Number of events to retrieve.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::FineTuning::FineTuningJobEvent>]
-
#
-
# @see OpenAI::Models::FineTuning::JobListEventsParams
-
1
def list_events(fine_tuning_job_id, params = {})
-
parsed, options = OpenAI::FineTuning::JobListEventsParams.dump_request(params)
-
@client.request(
-
method: :get,
-
path: ["fine_tuning/jobs/%1$s/events", fine_tuning_job_id],
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::FineTuning::FineTuningJobEvent,
-
options: options
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::JobPauseParams} for more details.
-
#
-
# Pause a fine-tune job.
-
#
-
# @overload pause(fine_tuning_job_id, request_options: {})
-
#
-
# @param fine_tuning_job_id [String] The ID of the fine-tuning job to pause.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::FineTuning::FineTuningJob]
-
#
-
# @see OpenAI::Models::FineTuning::JobPauseParams
-
1
def pause(fine_tuning_job_id, params = {})
-
@client.request(
-
method: :post,
-
path: ["fine_tuning/jobs/%1$s/pause", fine_tuning_job_id],
-
model: OpenAI::FineTuning::FineTuningJob,
-
options: params[:request_options]
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::JobResumeParams} for more details.
-
#
-
# Resume a fine-tune job.
-
#
-
# @overload resume(fine_tuning_job_id, request_options: {})
-
#
-
# @param fine_tuning_job_id [String] The ID of the fine-tuning job to resume.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::FineTuning::FineTuningJob]
-
#
-
# @see OpenAI::Models::FineTuning::JobResumeParams
-
1
def resume(fine_tuning_job_id, params = {})
-
@client.request(
-
method: :post,
-
path: ["fine_tuning/jobs/%1$s/resume", fine_tuning_job_id],
-
model: OpenAI::FineTuning::FineTuningJob,
-
options: params[:request_options]
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
2
@checkpoints = OpenAI::Resources::FineTuning::Jobs::Checkpoints.new(client: client)
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class FineTuning
-
1
class Jobs
-
1
class Checkpoints
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::FineTuning::Jobs::CheckpointListParams} for more details.
-
#
-
# List checkpoints for a fine-tuning job.
-
#
-
# @overload list(fine_tuning_job_id, after: nil, limit: nil, request_options: {})
-
#
-
# @param fine_tuning_job_id [String] The ID of the fine-tuning job to get checkpoints for.
-
#
-
# @param after [String] Identifier for the last checkpoint ID from the previous pagination request.
-
#
-
# @param limit [Integer] Number of checkpoints to retrieve.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::FineTuning::Jobs::FineTuningJobCheckpoint>]
-
#
-
# @see OpenAI::Models::FineTuning::Jobs::CheckpointListParams
-
1
def list(fine_tuning_job_id, params = {})
-
parsed, options = OpenAI::FineTuning::Jobs::CheckpointListParams.dump_request(params)
-
@client.request(
-
method: :get,
-
path: ["fine_tuning/jobs/%1$s/checkpoints", fine_tuning_job_id],
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::FineTuning::Jobs::FineTuningJobCheckpoint,
-
options: options
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class FineTuning
-
1
class Methods
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Graders
-
# @return [OpenAI::Resources::Graders::GraderModels]
-
1
attr_reader :grader_models
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
2
@grader_models = OpenAI::Resources::Graders::GraderModels.new(client: client)
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Graders
-
1
class GraderModels
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Images
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ImageCreateVariationParams} for more details.
-
#
-
# Creates a variation of a given image. This endpoint only supports `dall-e-2`.
-
#
-
# @overload create_variation(image:, model: nil, n: nil, response_format: nil, size: nil, user: nil, request_options: {})
-
#
-
# @param image [Pathname, StringIO, IO, String, OpenAI::FilePart] The image to use as the basis for the variation(s). Must be a valid PNG file, le
-
#
-
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` is supported at this time
-
#
-
# @param n [Integer, nil] The number of images to generate. Must be between 1 and 10.
-
#
-
# @param response_format [Symbol, OpenAI::Models::ImageCreateVariationParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or `
-
#
-
# @param size [Symbol, OpenAI::Models::ImageCreateVariationParams::Size, nil] The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x
-
#
-
# @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::ImagesResponse]
-
#
-
# @see OpenAI::Models::ImageCreateVariationParams
-
1
def create_variation(params)
-
parsed, options = OpenAI::ImageCreateVariationParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: "images/variations",
-
headers: {"content-type" => "multipart/form-data"},
-
body: parsed,
-
model: OpenAI::ImagesResponse,
-
options: options
-
)
-
end
-
-
# See {OpenAI::Resources::Images#edit_stream_raw} for streaming counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ImageEditParams} for more details.
-
#
-
# Creates an edited or extended image given one or more source images and a
-
# prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
-
#
-
# @overload edit(image:, prompt:, background: nil, input_fidelity: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
-
#
-
# @param image [Pathname, StringIO, IO, String, OpenAI::FilePart, Array<Pathname, StringIO, IO, String, OpenAI::FilePart>] The image(s) to edit. Must be a supported image file or an array of images.
-
#
-
# @param prompt [String] A text description of the desired image(s). The maximum length is 1000 character
-
#
-
# @param background [Symbol, OpenAI::Models::ImageEditParams::Background, nil] Allows to set transparency for the background of the generated image(s).
-
#
-
# @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] Control how much effort the model will exert to match the style and features,
-
#
-
# @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind
-
#
-
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are sup
-
#
-
# @param n [Integer, nil] The number of images to generate. Must be between 1 and 10.
-
#
-
# @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter
-
#
-
# @param output_format [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is
-
#
-
# @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for
-
#
-
# @param quality [Symbol, OpenAI::Models::ImageEditParams::Quality, nil] The quality of the image that will be generated. `high`, `medium` and `low` are
-
#
-
# @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or `
-
#
-
# @param size [Symbol, OpenAI::Models::ImageEditParams::Size, nil] The size of the generated images. Must be one of `1024x1024`, `1536x1024` (lands
-
#
-
# @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::ImagesResponse]
-
#
-
# @see OpenAI::Models::ImageEditParams
-
1
def edit(params)
-
parsed, options = OpenAI::ImageEditParams.dump_request(params)
-
then: 0
else: 0
if parsed[:stream]
-
message = "Please use `#edit_stream_raw` for the streaming use case."
-
raise ArgumentError.new(message)
-
end
-
@client.request(
-
method: :post,
-
path: "images/edits",
-
headers: {"content-type" => "multipart/form-data"},
-
body: parsed,
-
model: OpenAI::ImagesResponse,
-
options: options
-
)
-
end
-
-
# See {OpenAI::Resources::Images#edit} for non-streaming counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ImageEditParams} for more details.
-
#
-
# Creates an edited or extended image given one or more source images and a
-
# prompt. This endpoint only supports `gpt-image-1` and `dall-e-2`.
-
#
-
# @overload edit_stream_raw(image:, prompt:, background: nil, input_fidelity: nil, mask: nil, model: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, user: nil, request_options: {})
-
#
-
# @param image [Pathname, StringIO, IO, String, OpenAI::FilePart, Array<Pathname, StringIO, IO, String, OpenAI::FilePart>] The image(s) to edit. Must be a supported image file or an array of images.
-
#
-
# @param prompt [String] A text description of the desired image(s). The maximum length is 1000 character
-
#
-
# @param background [Symbol, OpenAI::Models::ImageEditParams::Background, nil] Allows to set transparency for the background of the generated image(s).
-
#
-
# @param input_fidelity [Symbol, OpenAI::Models::ImageEditParams::InputFidelity, nil] Control how much effort the model will exert to match the style and features,
-
#
-
# @param mask [Pathname, StringIO, IO, String, OpenAI::FilePart] An additional image whose fully transparent areas (e.g. where alpha is zero) ind
-
#
-
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. Only `dall-e-2` and `gpt-image-1` are sup
-
#
-
# @param n [Integer, nil] The number of images to generate. Must be between 1 and 10.
-
#
-
# @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter
-
#
-
# @param output_format [Symbol, OpenAI::Models::ImageEditParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is
-
#
-
# @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for
-
#
-
# @param quality [Symbol, OpenAI::Models::ImageEditParams::Quality, nil] The quality of the image that will be generated. `high`, `medium` and `low` are
-
#
-
# @param response_format [Symbol, OpenAI::Models::ImageEditParams::ResponseFormat, nil] The format in which the generated images are returned. Must be one of `url` or `
-
#
-
# @param size [Symbol, OpenAI::Models::ImageEditParams::Size, nil] The size of the generated images. Must be one of `1024x1024`, `1536x1024` (lands
-
#
-
# @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::Stream<OpenAI::Models::ImageEditPartialImageEvent, OpenAI::Models::ImageEditCompletedEvent>]
-
#
-
# @see OpenAI::Models::ImageEditParams
-
1
def edit_stream_raw(params)
-
parsed, options = OpenAI::ImageEditParams.dump_request(params)
-
else: 0
then: 0
unless parsed.fetch(:stream, true)
-
message = "Please use `#edit` for the non-streaming use case."
-
raise ArgumentError.new(message)
-
end
-
parsed.store(:stream, true)
-
@client.request(
-
method: :post,
-
path: "images/edits",
-
headers: {"content-type" => "multipart/form-data", "accept" => "text/event-stream"},
-
body: parsed,
-
stream: OpenAI::Internal::Stream,
-
model: OpenAI::ImageEditStreamEvent,
-
options: options
-
)
-
end
-
-
# See {OpenAI::Resources::Images#generate_stream_raw} for streaming counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ImageGenerateParams} for more details.
-
#
-
# Creates an image given a prompt.
-
# [Learn more](https://platform.openai.com/docs/guides/images).
-
#
-
# @overload generate(prompt:, background: nil, model: nil, moderation: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, style: nil, user: nil, request_options: {})
-
#
-
# @param prompt [String] A text description of the desired image(s). The maximum length is 32000 characte
-
#
-
# @param background [Symbol, OpenAI::Models::ImageGenerateParams::Background, nil] Allows to set transparency for the background of the generated image(s).
-
#
-
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or `gpt-im
-
#
-
# @param moderation [Symbol, OpenAI::Models::ImageGenerateParams::Moderation, nil] Control the content-moderation level for images generated by `gpt-image-1`. Must
-
#
-
# @param n [Integer, nil] The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
-
#
-
# @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter is only
-
#
-
# @param output_format [Symbol, OpenAI::Models::ImageGenerateParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is only su
-
#
-
# @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for
-
#
-
# @param quality [Symbol, OpenAI::Models::ImageGenerateParams::Quality, nil] The quality of the image that will be generated.
-
#
-
# @param response_format [Symbol, OpenAI::Models::ImageGenerateParams::ResponseFormat, nil] The format in which generated images with `dall-e-2` and `dall-e-3` are returned
-
#
-
# @param size [Symbol, OpenAI::Models::ImageGenerateParams::Size, nil] The size of the generated images. Must be one of `1024x1024`, `1536x1024` (lands
-
#
-
# @param style [Symbol, OpenAI::Models::ImageGenerateParams::Style, nil] The style of the generated images. This parameter is only supported for `dall-e-
-
#
-
# @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::ImagesResponse]
-
#
-
# @see OpenAI::Models::ImageGenerateParams
-
1
def generate(params)
-
parsed, options = OpenAI::ImageGenerateParams.dump_request(params)
-
then: 0
else: 0
if parsed[:stream]
-
message = "Please use `#generate_stream_raw` for the streaming use case."
-
raise ArgumentError.new(message)
-
end
-
@client.request(
-
method: :post,
-
path: "images/generations",
-
body: parsed,
-
model: OpenAI::ImagesResponse,
-
options: options
-
)
-
end
-
-
# See {OpenAI::Resources::Images#generate} for non-streaming counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ImageGenerateParams} for more details.
-
#
-
# Creates an image given a prompt.
-
# [Learn more](https://platform.openai.com/docs/guides/images).
-
#
-
# @overload generate_stream_raw(prompt:, background: nil, model: nil, moderation: nil, n: nil, output_compression: nil, output_format: nil, partial_images: nil, quality: nil, response_format: nil, size: nil, style: nil, user: nil, request_options: {})
-
#
-
# @param prompt [String] A text description of the desired image(s). The maximum length is 32000 characte
-
#
-
# @param background [Symbol, OpenAI::Models::ImageGenerateParams::Background, nil] Allows to set transparency for the background of the generated image(s).
-
#
-
# @param model [String, Symbol, OpenAI::Models::ImageModel, nil] The model to use for image generation. One of `dall-e-2`, `dall-e-3`, or `gpt-im
-
#
-
# @param moderation [Symbol, OpenAI::Models::ImageGenerateParams::Moderation, nil] Control the content-moderation level for images generated by `gpt-image-1`. Must
-
#
-
# @param n [Integer, nil] The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only
-
#
-
# @param output_compression [Integer, nil] The compression level (0-100%) for the generated images. This parameter is only
-
#
-
# @param output_format [Symbol, OpenAI::Models::ImageGenerateParams::OutputFormat, nil] The format in which the generated images are returned. This parameter is only su
-
#
-
# @param partial_images [Integer, nil] The number of partial images to generate. This parameter is used for
-
#
-
# @param quality [Symbol, OpenAI::Models::ImageGenerateParams::Quality, nil] The quality of the image that will be generated.
-
#
-
# @param response_format [Symbol, OpenAI::Models::ImageGenerateParams::ResponseFormat, nil] The format in which generated images with `dall-e-2` and `dall-e-3` are returned
-
#
-
# @param size [Symbol, OpenAI::Models::ImageGenerateParams::Size, nil] The size of the generated images. Must be one of `1024x1024`, `1536x1024` (lands
-
#
-
# @param style [Symbol, OpenAI::Models::ImageGenerateParams::Style, nil] The style of the generated images. This parameter is only supported for `dall-e-
-
#
-
# @param user [String] A unique identifier representing your end-user, which can help OpenAI to monitor
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::Stream<OpenAI::Models::ImageGenPartialImageEvent, OpenAI::Models::ImageGenCompletedEvent>]
-
#
-
# @see OpenAI::Models::ImageGenerateParams
-
1
def generate_stream_raw(params)
-
parsed, options = OpenAI::ImageGenerateParams.dump_request(params)
-
else: 0
then: 0
unless parsed.fetch(:stream, true)
-
message = "Please use `#generate` for the non-streaming use case."
-
raise ArgumentError.new(message)
-
end
-
parsed.store(:stream, true)
-
@client.request(
-
method: :post,
-
path: "images/generations",
-
headers: {"accept" => "text/event-stream"},
-
body: parsed,
-
stream: OpenAI::Internal::Stream,
-
model: OpenAI::ImageGenStreamEvent,
-
options: options
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Models
-
# Retrieves a model instance, providing basic information about the model such as
-
# the owner and permissioning.
-
#
-
# @overload retrieve(model, request_options: {})
-
#
-
# @param model [String] The ID of the model to use for this request
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Model]
-
#
-
# @see OpenAI::Models::ModelRetrieveParams
-
1
def retrieve(model, params = {})
-
@client.request(
-
method: :get,
-
path: ["models/%1$s", model],
-
model: OpenAI::Model,
-
options: params[:request_options]
-
)
-
end
-
-
# Lists the currently available models, and provides basic information about each
-
# one such as the owner and availability.
-
#
-
# @overload list(request_options: {})
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::Page<OpenAI::Models::Model>]
-
#
-
# @see OpenAI::Models::ModelListParams
-
1
def list(params = {})
-
@client.request(
-
method: :get,
-
path: "models",
-
page: OpenAI::Internal::Page,
-
model: OpenAI::Model,
-
options: params[:request_options]
-
)
-
end
-
-
# Delete a fine-tuned model. You must have the Owner role in your organization to
-
# delete a model.
-
#
-
# @overload delete(model, request_options: {})
-
#
-
# @param model [String] The model to delete
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::ModelDeleted]
-
#
-
# @see OpenAI::Models::ModelDeleteParams
-
1
def delete(model, params = {})
-
@client.request(
-
method: :delete,
-
path: ["models/%1$s", model],
-
model: OpenAI::ModelDeleted,
-
options: params[:request_options]
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Moderations
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::ModerationCreateParams} for more details.
-
#
-
# Classifies if text and/or image inputs are potentially harmful. Learn more in
-
# the [moderation guide](https://platform.openai.com/docs/guides/moderation).
-
#
-
# @overload create(input:, model: nil, request_options: {})
-
#
-
# @param input [String, Array<String>, Array<OpenAI::Models::ModerationImageURLInput, OpenAI::Models::ModerationTextInput>] Input (or inputs) to classify. Can be a single string, an array of strings, or
-
#
-
# @param model [String, Symbol, OpenAI::Models::ModerationModel] The content moderation model you would like to use. Learn more in
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::ModerationCreateResponse]
-
#
-
# @see OpenAI::Models::ModerationCreateParams
-
1
def create(params)
-
parsed, options = OpenAI::ModerationCreateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: "moderations",
-
body: parsed,
-
model: OpenAI::Models::ModerationCreateResponse,
-
options: options
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Responses
-
# @return [OpenAI::Resources::Responses::InputItems]
-
1
attr_reader :input_items
-
-
# See {OpenAI::Resources::Responses#stream_raw} for streaming counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseCreateParams} for more details.
-
#
-
# Creates a model response. Provide
-
# [text](https://platform.openai.com/docs/guides/text) or
-
# [image](https://platform.openai.com/docs/guides/images) inputs to generate
-
# [text](https://platform.openai.com/docs/guides/text) or
-
# [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
-
# the model call your own
-
# [custom code](https://platform.openai.com/docs/guides/function-calling) or use
-
# built-in [tools](https://platform.openai.com/docs/guides/tools) like
-
# [web search](https://platform.openai.com/docs/guides/tools-web-search) or
-
# [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
-
# your own data as input for the model's response.
-
#
-
# @overload create(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
-
#
-
# @param background [Boolean, nil] Whether to run the model response in the background.
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
-
#
-
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
-
#
-
# @param instructions [String, nil] A system (or developer) message inserted into the model's context.
-
#
-
# @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
-
#
-
# @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
-
#
-
# @param parallel_tool_calls [Boolean, nil] Whether to allow the model to run tool calls in parallel.
-
#
-
# @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to
-
#
-
# @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
-
#
-
# @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
-
#
-
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
-
#
-
# @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
-
#
-
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
-
#
-
# @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
-
#
-
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
-
#
-
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
-
#
-
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
-
#
-
# @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
-
#
-
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Responses::Response]
-
#
-
# @see OpenAI::Models::Responses::ResponseCreateParams
-
1
def create(params = {})
-
2
parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params)
-
2
then: 0
else: 2
if parsed[:stream]
-
message = "Please use `#stream_raw` for the streaming use case."
-
raise ArgumentError.new(message)
-
end
-
-
2
model, tool_models = get_structured_output_models(parsed)
-
-
2
unwrap = ->(raw) do
-
parse_structured_outputs!(raw, model, tool_models)
-
end
-
-
2
@client.request(
-
method: :post,
-
path: "responses",
-
body: parsed,
-
unwrap: unwrap,
-
model: OpenAI::Responses::Response,
-
options: options
-
)
-
end
-
-
# See {OpenAI::Resources::Responses#create} for non-streaming counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseCreateParams} for more details.
-
#
-
# Creates a model response. Provide
-
# [text](https://platform.openai.com/docs/guides/text) or
-
# [image](https://platform.openai.com/docs/guides/images) inputs to generate
-
# [text](https://platform.openai.com/docs/guides/text) or
-
# [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
-
# the model call your own
-
# [custom code](https://platform.openai.com/docs/guides/function-calling) or use
-
# built-in [tools](https://platform.openai.com/docs/guides/tools) like
-
# [web search](https://platform.openai.com/docs/guides/tools-web-search) or
-
# [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
-
# your own data as input for the model's response.
-
#
-
# @overload stream_raw(input:, model:, background: nil, include: nil, instructions: nil, max_output_tokens: nil, metadata: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, reasoning: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
-
#
-
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
-
#
-
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
-
#
-
# @param background [Boolean, nil] Whether to run the model response in the background.
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
-
#
-
# @param instructions [String, nil] A system (or developer) message inserted into the model's context.
-
#
-
# @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param parallel_tool_calls [Boolean, nil] Whether to allow the model to run tool calls in parallel.
-
#
-
# @param previous_response_id [String, nil] The unique ID of the previous response to the model. Use this to resume streams from a given response.
-
#
-
# @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
-
#
-
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
-
#
-
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the latency tier to use for processing the request. This parameter is
-
#
-
# @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
-
#
-
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction] How the model should select which tool (or tools) to use when generating
-
#
-
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
-
#
-
# @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
-
#
-
# @param user [String] A stable identifier for your end-users.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Helpers::Streaming::ResponseStream]
-
#
-
# @see OpenAI::Models::Responses::ResponseCreateParams
-
1
def stream(params)
-
parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params)
-
starting_after, previous_response_id = parsed.values_at(:starting_after, :previous_response_id)
-
-
then: 0
else: 0
if starting_after && !previous_response_id
-
raise ArgumentError, "starting_after can only be used with previous_response_id"
-
end
-
model, tool_models = get_structured_output_models(parsed)
-
-
unwrap = ->(raw) do
-
then: 0
else: 0
if raw[:type] == "response.completed" && raw[:response]
-
parse_structured_outputs!(raw[:response], model, tool_models)
-
end
-
raw
-
end
-
-
then: 0
if previous_response_id
-
retrieve_params = params.slice(:include, :request_options)
-
-
raw_stream = retrieve_streaming_internal(
-
previous_response_id,
-
params: retrieve_params,
-
unwrap: unwrap
-
)
-
else: 0
else
-
parsed[:stream] = true
-
-
raw_stream = @client.request(
-
method: :post,
-
path: "responses",
-
headers: {"accept" => "text/event-stream"},
-
body: parsed,
-
stream: OpenAI::Internal::Stream,
-
model: OpenAI::Models::Responses::ResponseStreamEvent,
-
unwrap: unwrap,
-
options: options
-
)
-
end
-
-
OpenAI::Streaming::ResponseStream.new(
-
raw_stream: raw_stream,
-
text_format: model,
-
starting_after: starting_after
-
)
-
end
-
-
# See {OpenAI::Resources::Responses#create} for non-streaming counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseCreateParams} for more details.
-
#
-
# Creates a model response. Provide
-
# [text](https://platform.openai.com/docs/guides/text) or
-
# [image](https://platform.openai.com/docs/guides/images) inputs to generate
-
# [text](https://platform.openai.com/docs/guides/text) or
-
# [JSON](https://platform.openai.com/docs/guides/structured-outputs) outputs. Have
-
# the model call your own
-
# [custom code](https://platform.openai.com/docs/guides/function-calling) or use
-
# built-in [tools](https://platform.openai.com/docs/guides/tools) like
-
# [web search](https://platform.openai.com/docs/guides/tools-web-search) or
-
# [file search](https://platform.openai.com/docs/guides/tools-file-search) to use
-
# your own data as input for the model's response.
-
#
-
# @overload stream_raw(background: nil, include: nil, input: nil, instructions: nil, max_output_tokens: nil, max_tool_calls: nil, metadata: nil, model: nil, parallel_tool_calls: nil, previous_response_id: nil, prompt: nil, prompt_cache_key: nil, reasoning: nil, safety_identifier: nil, service_tier: nil, store: nil, temperature: nil, text: nil, tool_choice: nil, tools: nil, top_logprobs: nil, top_p: nil, truncation: nil, user: nil, request_options: {})
-
#
-
# @param background [Boolean, nil] Whether to run the model response in the background.
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>, nil] Specify additional output data to include in the model response. Currently
-
#
-
# @param input [String, Array<OpenAI::Models::Responses::EasyInputMessage, OpenAI::Models::Responses::ResponseInputItem::Message, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseInputItem::ComputerCallOutput, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCall, OpenAI::Models::Responses::ResponseInputItem::FunctionCallOutput, OpenAI::Models::Responses::ResponseReasoningItem, OpenAI::Models::Responses::ResponseInputItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCall, OpenAI::Models::Responses::ResponseInputItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseInputItem::McpListTools, OpenAI::Models::Responses::ResponseInputItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseInputItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseInputItem::McpCall, OpenAI::Models::Responses::ResponseInputItem::ItemReference>] Text, image, or file inputs to the model, used to generate a response.
-
#
-
# @param instructions [String, nil] A system (or developer) message inserted into the model's context.
-
#
-
# @param max_output_tokens [Integer, nil] An upper bound for the number of tokens that can be generated for a response, in
-
#
-
# @param max_tool_calls [Integer, nil] The maximum number of total calls to built-in tools that can be processed in a r
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param model [String, Symbol, OpenAI::Models::ChatModel, OpenAI::Models::ResponsesModel::ResponsesOnlyModel] Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI
-
#
-
# @param parallel_tool_calls [Boolean, nil] Whether to allow the model to run tool calls in parallel.
-
#
-
# @param previous_response_id [String, nil] The unique ID of the previous response to the response to the model. Use this to resume streams from a given response.
-
#
-
# @param prompt [OpenAI::Models::Responses::ResponsePrompt, nil] Reference to a prompt template and its variables.
-
#
-
# @param prompt_cache_key [String] Used by OpenAI to cache responses for similar requests to optimize your cache hi
-
#
-
# @param reasoning [OpenAI::Models::Reasoning, nil] **o-series models only**
-
#
-
# @param safety_identifier [String] A stable identifier used to help detect users of your application that may be vi
-
#
-
# @param service_tier [Symbol, OpenAI::Models::Responses::ResponseCreateParams::ServiceTier, nil] Specifies the processing type used for serving the request.
-
#
-
# @param store [Boolean, nil] Whether to store the generated model response for later retrieval via
-
#
-
# @param temperature [Float, nil] What sampling temperature to use, between 0 and 2. Higher values like 0.8 will m
-
#
-
# @param text [OpenAI::Models::Responses::ResponseTextConfig] Configuration options for a text response from the model. Can be plain
-
#
-
# @param tool_choice [Symbol, OpenAI::Models::Responses::ToolChoiceOptions, OpenAI::Models::Responses::ToolChoiceTypes, OpenAI::Models::Responses::ToolChoiceFunction, OpenAI::Models::Responses::ToolChoiceMcp] How the model should select which tool (or tools) to use when generating
-
#
-
# @param tools [Array<OpenAI::Models::Responses::FunctionTool, OpenAI::Models::Responses::FileSearchTool, OpenAI::Models::Responses::ComputerTool, OpenAI::Models::Responses::Tool::Mcp, OpenAI::Models::Responses::Tool::CodeInterpreter, OpenAI::Models::Responses::Tool::ImageGeneration, OpenAI::Models::Responses::Tool::LocalShell, OpenAI::Models::Responses::WebSearchTool>] An array of tools the model may call while generating a response. You
-
#
-
# @param top_logprobs [Integer, nil] An integer between 0 and 20 specifying the number of most likely tokens to
-
#
-
# @param top_p [Float, nil] An alternative to sampling with temperature, called nucleus sampling,
-
#
-
# @param truncation [Symbol, OpenAI::Models::Responses::ResponseCreateParams::Truncation, nil] The truncation strategy to use for the model response.
-
#
-
# @param user [String] This field is being replaced by `safety_identifier` and `prompt_cache_key`. Use
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent>]
-
#
-
# @see OpenAI::Models::Responses::ResponseCreateParams
-
1
def stream_raw(params = {})
-
parsed, options = OpenAI::Responses::ResponseCreateParams.dump_request(params)
-
else: 0
then: 0
unless parsed.fetch(:stream, true)
-
message = "Please use `#create` for the non-streaming use case."
-
raise ArgumentError.new(message)
-
end
-
parsed.store(:stream, true)
-
-
@client.request(
-
method: :post,
-
path: "responses",
-
headers: {"accept" => "text/event-stream"},
-
body: parsed,
-
stream: OpenAI::Internal::Stream,
-
model: OpenAI::Responses::ResponseStreamEvent,
-
options: options
-
)
-
end
-
-
# See {OpenAI::Resources::Responses#retrieve_streaming} for streaming counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseRetrieveParams} for more details.
-
#
-
# Retrieves a model response with the given ID.
-
#
-
# @overload retrieve(response_id, include: nil, starting_after: nil, request_options: {})
-
#
-
# @param response_id [String] The ID of the response to retrieve.
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>] Additional fields to include in the response. See the `include`
-
#
-
# @param starting_after [Integer] The sequence number of the event after which to start streaming.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Responses::Response]
-
#
-
# @see OpenAI::Models::Responses::ResponseRetrieveParams
-
1
def retrieve(response_id, params = {})
-
parsed, options = OpenAI::Responses::ResponseRetrieveParams.dump_request(params)
-
then: 0
else: 0
if parsed[:stream]
-
message = "Please use `#retrieve_streaming` for the streaming use case."
-
raise ArgumentError.new(message)
-
end
-
@client.request(
-
method: :get,
-
path: ["responses/%1$s", response_id],
-
query: parsed,
-
model: OpenAI::Responses::Response,
-
options: options
-
)
-
end
-
-
# See {OpenAI::Resources::Responses#retrieve} for non-streaming counterpart.
-
#
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::ResponseRetrieveParams} for more details.
-
#
-
# Retrieves a model response with the given ID.
-
#
-
# @overload retrieve_streaming(response_id, include: nil, starting_after: nil, request_options: {})
-
#
-
# @param response_id [String] The ID of the response to retrieve.
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>] Additional fields to include in the response. See the `include`
-
#
-
# @param starting_after [Integer] The sequence number of the event after which to start streaming.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::Stream<OpenAI::Models::Responses::ResponseAudioDeltaEvent, OpenAI::Models::Responses::ResponseAudioDoneEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDeltaEvent, OpenAI::Models::Responses::ResponseAudioTranscriptDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDeltaEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCodeDoneEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallCompletedEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInProgressEvent, OpenAI::Models::Responses::ResponseCodeInterpreterCallInterpretingEvent, OpenAI::Models::Responses::ResponseCompletedEvent, OpenAI::Models::Responses::ResponseContentPartAddedEvent, OpenAI::Models::Responses::ResponseContentPartDoneEvent, OpenAI::Models::Responses::ResponseCreatedEvent, OpenAI::Models::Responses::ResponseErrorEvent, OpenAI::Models::Responses::ResponseFileSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseFileSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseFileSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseFunctionCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseInProgressEvent, OpenAI::Models::Responses::ResponseFailedEvent, OpenAI::Models::Responses::ResponseIncompleteEvent, OpenAI::Models::Responses::ResponseOutputItemAddedEvent, OpenAI::Models::Responses::ResponseOutputItemDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartAddedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryPartDoneEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryTextDoneEvent, OpenAI::Models::Responses::ResponseRefusalDeltaEvent, OpenAI::Models::Responses::ResponseRefusalDoneEvent, OpenAI::Models::Responses::ResponseTextDeltaEvent, OpenAI::Models::Responses::ResponseTextDoneEvent, OpenAI::Models::Responses::ResponseWebSearchCallCompletedEvent, OpenAI::Models::Responses::ResponseWebSearchCallInProgressEvent, OpenAI::Models::Responses::ResponseWebSearchCallSearchingEvent, OpenAI::Models::Responses::ResponseImageGenCallCompletedEvent, OpenAI::Models::Responses::ResponseImageGenCallGeneratingEvent, OpenAI::Models::Responses::ResponseImageGenCallInProgressEvent, OpenAI::Models::Responses::ResponseImageGenCallPartialImageEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDeltaEvent, OpenAI::Models::Responses::ResponseMcpCallArgumentsDoneEvent, OpenAI::Models::Responses::ResponseMcpCallCompletedEvent, OpenAI::Models::Responses::ResponseMcpCallFailedEvent, OpenAI::Models::Responses::ResponseMcpCallInProgressEvent, OpenAI::Models::Responses::ResponseMcpListToolsCompletedEvent, OpenAI::Models::Responses::ResponseMcpListToolsFailedEvent, OpenAI::Models::Responses::ResponseMcpListToolsInProgressEvent, OpenAI::Models::Responses::ResponseOutputTextAnnotationAddedEvent, OpenAI::Models::Responses::ResponseQueuedEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDeltaEvent, OpenAI::Models::Responses::ResponseReasoningSummaryDoneEvent>]
-
#
-
# @see OpenAI::Models::Responses::ResponseRetrieveParams
-
1
def retrieve_streaming(response_id, params = {})
-
parsed, options = OpenAI::Responses::ResponseRetrieveParams.dump_request(params)
-
else: 0
then: 0
unless parsed.fetch(:stream, true)
-
message = "Please use `#retrieve` for the non-streaming use case."
-
raise ArgumentError.new(message)
-
end
-
parsed.store(:stream, true)
-
@client.request(
-
method: :get,
-
path: ["responses/%1$s", response_id],
-
query: parsed,
-
headers: {"accept" => "text/event-stream"},
-
stream: OpenAI::Internal::Stream,
-
model: OpenAI::Responses::ResponseStreamEvent,
-
options: options
-
)
-
end
-
-
1
private def retrieve_streaming_internal(response_id, params:, unwrap:)
-
parsed, options = OpenAI::Responses::ResponseRetrieveParams.dump_request(params)
-
parsed.store(:stream, true)
-
@client.request(
-
method: :get,
-
path: ["responses/%1$s", response_id],
-
query: parsed,
-
headers: {"accept" => "text/event-stream"},
-
stream: OpenAI::Internal::Stream,
-
model: OpenAI::Responses::ResponseStreamEvent,
-
options: options,
-
unwrap: unwrap
-
)
-
end
-
-
# Deletes a model response with the given ID.
-
#
-
# @overload delete(response_id, request_options: {})
-
#
-
# @param response_id [String] The ID of the response to delete.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [nil]
-
#
-
# @see OpenAI::Models::Responses::ResponseDeleteParams
-
1
def delete(response_id, params = {})
-
@client.request(
-
method: :delete,
-
path: ["responses/%1$s", response_id],
-
model: NilClass,
-
options: params[:request_options]
-
)
-
end
-
-
# Cancels a model response with the given ID. Only responses created with the
-
# `background` parameter set to `true` can be cancelled.
-
# [Learn more](https://platform.openai.com/docs/guides/background).
-
#
-
# @overload cancel(response_id, request_options: {})
-
#
-
# @param response_id [String] The ID of the response to cancel.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Responses::Response]
-
#
-
# @see OpenAI::Models::Responses::ResponseCancelParams
-
1
def cancel(response_id, params = {})
-
@client.request(
-
method: :post,
-
path: ["responses/%1$s/cancel", response_id],
-
model: OpenAI::Responses::Response,
-
options: params[:request_options]
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
2
@input_items = OpenAI::Resources::Responses::InputItems.new(client: client)
-
end
-
-
1
private
-
-
# Post-processes raw API responses to parse and coerce structured outputs into typed Ruby objects.
-
#
-
# This method enhances the raw API response by parsing JSON content in structured outputs
-
# (both text outputs and function/tool calls) and converting them to their corresponding
-
# Ruby types using the JsonSchemaConverter models identified during request preparation.
-
#
-
# @param raw [Hash] The raw API response hash that will be mutated with parsed data
-
# @param model [JsonSchemaConverter|nil] The converter for structured text output, if specified
-
# @param tool_models [Hash<String, JsonSchemaConverter>] Hash mapping tool names to their converters
-
# @return [Hash] The mutated raw response with added :parsed fields containing typed Ruby objects
-
#
-
# The method performs two main transformations:
-
# 1. For structured text outputs: Finds output_text content, parses the JSON, and coerces it
-
# to the model type, adding the result as content[:parsed]
-
# 2. For function/tool calls: Looks up the tool's converter by name, parses the arguments JSON,
-
# and coerces it to the appropriate type, adding the result as output[:parsed]
-
1
def parse_structured_outputs!(raw, model, tool_models)
-
then: 0
else: 0
if model.is_a?(OpenAI::StructuredOutput::JsonSchemaConverter)
-
then: 0
else: 0
then: 0
else: 0
raw[:output]
-
&.flat_map do |output|
-
else: 0
then: 0
next [] unless output[:type] == "message"
-
output[:content].to_a
-
end
-
&.each do |content|
-
else: 0
then: 0
next unless content[:type] == "output_text"
-
begin
-
parsed = JSON.parse(content.fetch(:text), symbolize_names: true)
-
rescue JSON::ParserError => e
-
parsed = e
-
end
-
coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
-
content.store(:parsed, coerced)
-
end
-
end
-
then: 0
else: 0
raw[:output]&.each do |output|
-
else: 0
then: 0
next unless output[:type] == "function_call"
-
then: 0
else: 0
next if (model = tool_models[output.fetch(:name)]).nil?
-
begin
-
parsed = JSON.parse(output.fetch(:arguments), symbolize_names: true)
-
rescue JSON::ParserError => e
-
parsed = e
-
end
-
coerced = OpenAI::Internal::Type::Converter.coerce(model, parsed)
-
output.store(:parsed, coerced)
-
end
-
-
raw
-
end
-
-
# Extracts structured output models from request parameters and converts them to JSON Schema format.
-
#
-
# This method processes the parsed request parameters to identify any JsonSchemaConverter instances
-
# that define expected output schemas. It transforms these Ruby schema definitions into the JSON
-
# Schema format required by the OpenAI API, enabling type-safe structured outputs.
-
#
-
# @param parsed [Hash] The parsed request parameters that may contain structured output definitions
-
# @return [Array<(JsonSchemaConverter|nil, Hash)>] A tuple containing:
-
# - model: The JsonSchemaConverter for structured text output (or nil if not specified)
-
# - tool_models: Hash mapping tool names to their JsonSchemaConverter models
-
#
-
# The method handles multiple ways structured outputs can be specified:
-
# - Direct text format: { text: JsonSchemaConverter }
-
# - Nested text format: { text: { format: JsonSchemaConverter } }
-
# - Deep nested format: { text: { format: { type: :json_schema, schema: JsonSchemaConverter } } }
-
# - Tool parameters: { tools: [JsonSchemaConverter, ...] } or tools with parameters as converters
-
1
def get_structured_output_models(parsed)
-
2
model = nil
-
2
tool_models = {}
-
-
case parsed
-
2
in: 0
in {text: OpenAI::StructuredOutput::JsonSchemaConverter => model}
-
parsed.update(
-
text: {
-
format: {
-
type: :json_schema,
-
strict: true,
-
name: model.name.split("::").last,
-
schema: model.to_json_schema
-
}
-
}
-
)
-
in: 0
in {text: {format: OpenAI::StructuredOutput::JsonSchemaConverter => model}}
-
parsed.fetch(:text).update(
-
format: {
-
type: :json_schema,
-
strict: true,
-
name: model.name.split("::").last,
-
schema: model.to_json_schema
-
}
-
)
-
in {text: {format: {type: :json_schema,
-
in: 0
schema: OpenAI::StructuredOutput::JsonSchemaConverter => model}}}
-
parsed.dig(:text, :format).store(:schema, model.to_json_schema)
-
in {tools: Array => tools}
-
in: 0
# rubocop:disable Metrics/BlockLength
-
mapped = tools.map do |tool|
-
case tool
-
in: 0
in OpenAI::StructuredOutput::JsonSchemaConverter
-
name = tool.name.split("::").last
-
tool_models.store(name, tool)
-
{
-
type: :function,
-
strict: true,
-
name: name,
-
parameters: tool.to_json_schema
-
}
-
in: 0
in {type: :function, parameters: OpenAI::StructuredOutput::JsonSchemaConverter => params}
-
func = tool.fetch(:function)
-
name = func[:name] ||= params.name.split("::").last
-
tool_models.store(name, params)
-
func.update(parameters: params.to_json_schema)
-
tool
-
in: 0
in {type: _, function: {parameters: OpenAI::StructuredOutput::JsonSchemaConverter => params, **}}
-
name = tool[:function][:name] || params.name.split("::").last
-
tool_models.store(name, params)
-
tool[:function][:parameters] = params.to_json_schema
-
tool
-
in: 0
in {type: _, function: Hash => func} if func[:parameters].is_a?(Class) && func[:parameters] < OpenAI::Internal::Type::BaseModel
-
params = func[:parameters]
-
name = func[:name] || params.name.split("::").last
-
tool_models.store(name, params)
-
func[:parameters] = params.to_json_schema
-
tool
-
else: 0
else
-
tool
-
end
-
end
-
# rubocop:enable Metrics/BlockLength
-
else: 2
tools.replace(mapped)
-
else
-
end
-
-
2
[model, tool_models]
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Responses
-
1
class InputItems
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Responses::InputItemListParams} for more details.
-
#
-
# Returns a list of input items for a given response.
-
#
-
# @overload list(response_id, after: nil, before: nil, include: nil, limit: nil, order: nil, request_options: {})
-
#
-
# @param response_id [String] The ID of the response to retrieve input items for.
-
#
-
# @param after [String] An item ID to list items after, used in pagination.
-
#
-
# @param before [String] An item ID to list items before, used in pagination.
-
#
-
# @param include [Array<Symbol, OpenAI::Models::Responses::ResponseIncludable>] Additional fields to include in the response. See the `include`
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between
-
#
-
# @param order [Symbol, OpenAI::Models::Responses::InputItemListParams::Order] The order to return the input items in. Default is `desc`.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::Responses::ResponseInputMessageItem, OpenAI::Models::Responses::ResponseOutputMessage, OpenAI::Models::Responses::ResponseFileSearchToolCall, OpenAI::Models::Responses::ResponseComputerToolCall, OpenAI::Models::Responses::ResponseComputerToolCallOutputItem, OpenAI::Models::Responses::ResponseFunctionWebSearch, OpenAI::Models::Responses::ResponseFunctionToolCallItem, OpenAI::Models::Responses::ResponseFunctionToolCallOutputItem, OpenAI::Models::Responses::ResponseItem::ImageGenerationCall, OpenAI::Models::Responses::ResponseCodeInterpreterToolCall, OpenAI::Models::Responses::ResponseItem::LocalShellCall, OpenAI::Models::Responses::ResponseItem::LocalShellCallOutput, OpenAI::Models::Responses::ResponseItem::McpListTools, OpenAI::Models::Responses::ResponseItem::McpApprovalRequest, OpenAI::Models::Responses::ResponseItem::McpApprovalResponse, OpenAI::Models::Responses::ResponseItem::McpCall>]
-
#
-
# @see OpenAI::Models::Responses::InputItemListParams
-
1
def list(response_id, params = {})
-
parsed, options = OpenAI::Responses::InputItemListParams.dump_request(params)
-
@client.request(
-
method: :get,
-
path: ["responses/%1$s/input_items", response_id],
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::Responses::ResponseItem,
-
options: options
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Uploads
-
# @return [OpenAI::Resources::Uploads::Parts]
-
1
attr_reader :parts
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::UploadCreateParams} for more details.
-
#
-
# Creates an intermediate
-
# [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object
-
# that you can add
-
# [Parts](https://platform.openai.com/docs/api-reference/uploads/part-object) to.
-
# Currently, an Upload can accept at most 8 GB in total and expires after an hour
-
# after you create it.
-
#
-
# Once you complete the Upload, we will create a
-
# [File](https://platform.openai.com/docs/api-reference/files/object) object that
-
# contains all the parts you uploaded. This File is usable in the rest of our
-
# platform as a regular File object.
-
#
-
# For certain `purpose` values, the correct `mime_type` must be specified. Please
-
# refer to documentation for the
-
# [supported MIME types for your use case](https://platform.openai.com/docs/assistants/tools/file-search#supported-files).
-
#
-
# For guidance on the proper filename extensions for each purpose, please follow
-
# the documentation on
-
# [creating a File](https://platform.openai.com/docs/api-reference/files/create).
-
#
-
# @overload create(bytes:, filename:, mime_type:, purpose:, request_options: {})
-
#
-
# @param bytes [Integer] The number of bytes in the file you are uploading.
-
#
-
# @param filename [String] The name of the file to upload.
-
#
-
# @param mime_type [String] The MIME type of the file.
-
#
-
# @param purpose [Symbol, OpenAI::Models::FilePurpose] The intended purpose of the uploaded file.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Upload]
-
#
-
# @see OpenAI::Models::UploadCreateParams
-
1
def create(params)
-
parsed, options = OpenAI::UploadCreateParams.dump_request(params)
-
@client.request(method: :post, path: "uploads", body: parsed, model: OpenAI::Upload, options: options)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::UploadCancelParams} for more details.
-
#
-
# Cancels the Upload. No Parts may be added after an Upload is cancelled.
-
#
-
# @overload cancel(upload_id, request_options: {})
-
#
-
# @param upload_id [String] The ID of the Upload.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Upload]
-
#
-
# @see OpenAI::Models::UploadCancelParams
-
1
def cancel(upload_id, params = {})
-
@client.request(
-
method: :post,
-
path: ["uploads/%1$s/cancel", upload_id],
-
model: OpenAI::Upload,
-
options: params[:request_options]
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::UploadCompleteParams} for more details.
-
#
-
# Completes the
-
# [Upload](https://platform.openai.com/docs/api-reference/uploads/object).
-
#
-
# Within the returned Upload object, there is a nested
-
# [File](https://platform.openai.com/docs/api-reference/files/object) object that
-
# is ready to use in the rest of the platform.
-
#
-
# You can specify the order of the Parts by passing in an ordered list of the Part
-
# IDs.
-
#
-
# The number of bytes uploaded upon completion must match the number of bytes
-
# initially specified when creating the Upload object. No Parts may be added after
-
# an Upload is completed.
-
#
-
# @overload complete(upload_id, part_ids:, md5: nil, request_options: {})
-
#
-
# @param upload_id [String] The ID of the Upload.
-
#
-
# @param part_ids [Array<String>] The ordered list of Part IDs.
-
#
-
# @param md5 [String] The optional md5 checksum for the file contents to verify if the bytes uploaded
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Upload]
-
#
-
# @see OpenAI::Models::UploadCompleteParams
-
1
def complete(upload_id, params)
-
parsed, options = OpenAI::UploadCompleteParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: ["uploads/%1$s/complete", upload_id],
-
body: parsed,
-
model: OpenAI::Upload,
-
options: options
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
2
@parts = OpenAI::Resources::Uploads::Parts.new(client: client)
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class Uploads
-
1
class Parts
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::Uploads::PartCreateParams} for more details.
-
#
-
# Adds a
-
# [Part](https://platform.openai.com/docs/api-reference/uploads/part-object) to an
-
# [Upload](https://platform.openai.com/docs/api-reference/uploads/object) object.
-
# A Part represents a chunk of bytes from the file you are trying to upload.
-
#
-
# Each Part can be at most 64 MB, and you can add Parts until you hit the Upload
-
# maximum of 8 GB.
-
#
-
# It is possible to add multiple Parts in parallel. You can decide the intended
-
# order of the Parts when you
-
# [complete the Upload](https://platform.openai.com/docs/api-reference/uploads/complete).
-
#
-
# @overload create(upload_id, data:, request_options: {})
-
#
-
# @param upload_id [String] The ID of the Upload.
-
#
-
# @param data [Pathname, StringIO, IO, String, OpenAI::FilePart] The chunk of bytes for this Part.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::Uploads::UploadPart]
-
#
-
# @see OpenAI::Models::Uploads::PartCreateParams
-
1
def create(upload_id, params)
-
parsed, options = OpenAI::Uploads::PartCreateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: ["uploads/%1$s/parts", upload_id],
-
headers: {"content-type" => "multipart/form-data"},
-
body: parsed,
-
model: OpenAI::Uploads::UploadPart,
-
options: options
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class VectorStores
-
# @return [OpenAI::Resources::VectorStores::Files]
-
1
attr_reader :files
-
-
# @return [OpenAI::Resources::VectorStores::FileBatches]
-
1
attr_reader :file_batches
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStoreCreateParams} for more details.
-
#
-
# Create a vector store.
-
#
-
# @overload create(chunking_strategy: nil, expires_after: nil, file_ids: nil, metadata: nil, name: nil, request_options: {})
-
#
-
# @param chunking_strategy [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
#
-
# @param expires_after [OpenAI::Models::VectorStoreCreateParams::ExpiresAfter] The expiration policy for a vector store.
-
#
-
# @param file_ids [Array<String>] A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param name [String] The name of the vector store.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::VectorStore]
-
#
-
# @see OpenAI::Models::VectorStoreCreateParams
-
1
def create(params = {})
-
parsed, options = OpenAI::VectorStoreCreateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: "vector_stores",
-
body: parsed,
-
model: OpenAI::VectorStore,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# Retrieves a vector store.
-
#
-
# @overload retrieve(vector_store_id, request_options: {})
-
#
-
# @param vector_store_id [String] The ID of the vector store to retrieve.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::VectorStore]
-
#
-
# @see OpenAI::Models::VectorStoreRetrieveParams
-
1
def retrieve(vector_store_id, params = {})
-
@client.request(
-
method: :get,
-
path: ["vector_stores/%1$s", vector_store_id],
-
model: OpenAI::VectorStore,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **params[:request_options].to_h}
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStoreUpdateParams} for more details.
-
#
-
# Modifies a vector store.
-
#
-
# @overload update(vector_store_id, expires_after: nil, metadata: nil, name: nil, request_options: {})
-
#
-
# @param vector_store_id [String] The ID of the vector store to modify.
-
#
-
# @param expires_after [OpenAI::Models::VectorStoreUpdateParams::ExpiresAfter, nil] The expiration policy for a vector store.
-
#
-
# @param metadata [Hash{Symbol=>String}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param name [String, nil] The name of the vector store.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::VectorStore]
-
#
-
# @see OpenAI::Models::VectorStoreUpdateParams
-
1
def update(vector_store_id, params = {})
-
parsed, options = OpenAI::VectorStoreUpdateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: ["vector_stores/%1$s", vector_store_id],
-
body: parsed,
-
model: OpenAI::VectorStore,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStoreListParams} for more details.
-
#
-
# Returns a list of vector stores.
-
#
-
# @overload list(after: nil, before: nil, limit: nil, order: nil, request_options: {})
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param order [Symbol, OpenAI::Models::VectorStoreListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::VectorStore>]
-
#
-
# @see OpenAI::Models::VectorStoreListParams
-
1
def list(params = {})
-
parsed, options = OpenAI::VectorStoreListParams.dump_request(params)
-
@client.request(
-
method: :get,
-
path: "vector_stores",
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::VectorStore,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# Delete a vector store.
-
#
-
# @overload delete(vector_store_id, request_options: {})
-
#
-
# @param vector_store_id [String] The ID of the vector store to delete.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::VectorStoreDeleted]
-
#
-
# @see OpenAI::Models::VectorStoreDeleteParams
-
1
def delete(vector_store_id, params = {})
-
@client.request(
-
method: :delete,
-
path: ["vector_stores/%1$s", vector_store_id],
-
model: OpenAI::VectorStoreDeleted,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **params[:request_options].to_h}
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStoreSearchParams} for more details.
-
#
-
# Search a vector store for relevant chunks based on a query and file attributes
-
# filter.
-
#
-
# @overload search(vector_store_id, query:, filters: nil, max_num_results: nil, ranking_options: nil, rewrite_query: nil, request_options: {})
-
#
-
# @param vector_store_id [String] The ID of the vector store to search.
-
#
-
# @param query [String, Array<String>] A query string for a search
-
#
-
# @param filters [OpenAI::Models::ComparisonFilter, OpenAI::Models::CompoundFilter] A filter to apply based on file attributes.
-
#
-
# @param max_num_results [Integer] The maximum number of results to return. This number should be between 1 and 50
-
#
-
# @param ranking_options [OpenAI::Models::VectorStoreSearchParams::RankingOptions] Ranking options for search.
-
#
-
# @param rewrite_query [Boolean] Whether to rewrite the natural language query for vector search.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::Page<OpenAI::Models::VectorStoreSearchResponse>]
-
#
-
# @see OpenAI::Models::VectorStoreSearchParams
-
1
def search(vector_store_id, params)
-
parsed, options = OpenAI::VectorStoreSearchParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: ["vector_stores/%1$s/search", vector_store_id],
-
body: parsed,
-
page: OpenAI::Internal::Page,
-
model: OpenAI::Models::VectorStoreSearchResponse,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
2
@files = OpenAI::Resources::VectorStores::Files.new(client: client)
-
2
@file_batches = OpenAI::Resources::VectorStores::FileBatches.new(client: client)
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class VectorStores
-
1
class FileBatches
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStores::FileBatchCreateParams} for more details.
-
#
-
# Create a vector store file batch.
-
#
-
# @overload create(vector_store_id, file_ids:, attributes: nil, chunking_strategy: nil, request_options: {})
-
#
-
# @param vector_store_id [String] The ID of the vector store for which to create a File Batch.
-
#
-
# @param file_ids [Array<String>] A list of [File](https://platform.openai.com/docs/api-reference/files) IDs that
-
#
-
# @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param chunking_strategy [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::VectorStores::VectorStoreFileBatch]
-
#
-
# @see OpenAI::Models::VectorStores::FileBatchCreateParams
-
1
def create(vector_store_id, params)
-
parsed, options = OpenAI::VectorStores::FileBatchCreateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: ["vector_stores/%1$s/file_batches", vector_store_id],
-
body: parsed,
-
model: OpenAI::VectorStores::VectorStoreFileBatch,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# Retrieves a vector store file batch.
-
#
-
# @overload retrieve(batch_id, vector_store_id:, request_options: {})
-
#
-
# @param batch_id [String] The ID of the file batch being retrieved.
-
#
-
# @param vector_store_id [String] The ID of the vector store that the file batch belongs to.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::VectorStores::VectorStoreFileBatch]
-
#
-
# @see OpenAI::Models::VectorStores::FileBatchRetrieveParams
-
1
def retrieve(batch_id, params)
-
parsed, options = OpenAI::VectorStores::FileBatchRetrieveParams.dump_request(params)
-
vector_store_id =
-
parsed.delete(:vector_store_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :get,
-
path: ["vector_stores/%1$s/file_batches/%2$s", vector_store_id, batch_id],
-
model: OpenAI::VectorStores::VectorStoreFileBatch,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# Cancel a vector store file batch. This attempts to cancel the processing of
-
# files in this batch as soon as possible.
-
#
-
# @overload cancel(batch_id, vector_store_id:, request_options: {})
-
#
-
# @param batch_id [String] The ID of the file batch to cancel.
-
#
-
# @param vector_store_id [String] The ID of the vector store that the file batch belongs to.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::VectorStores::VectorStoreFileBatch]
-
#
-
# @see OpenAI::Models::VectorStores::FileBatchCancelParams
-
1
def cancel(batch_id, params)
-
parsed, options = OpenAI::VectorStores::FileBatchCancelParams.dump_request(params)
-
vector_store_id =
-
parsed.delete(:vector_store_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :post,
-
path: ["vector_stores/%1$s/file_batches/%2$s/cancel", vector_store_id, batch_id],
-
model: OpenAI::VectorStores::VectorStoreFileBatch,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStores::FileBatchListFilesParams} for more details.
-
#
-
# Returns a list of vector store files in a batch.
-
#
-
# @overload list_files(batch_id, vector_store_id:, after: nil, before: nil, filter: nil, limit: nil, order: nil, request_options: {})
-
#
-
# @param batch_id [String] Path param: The ID of the file batch that the files belong to.
-
#
-
# @param vector_store_id [String] Path param: The ID of the vector store that the files belong to.
-
#
-
# @param after [String] Query param: A cursor for use in pagination. `after` is an object ID that define
-
#
-
# @param before [String] Query param: A cursor for use in pagination. `before` is an object ID that defin
-
#
-
# @param filter [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Filter] Query param: Filter by file status. One of `in_progress`, `completed`, `failed`,
-
#
-
# @param limit [Integer] Query param: A limit on the number of objects to be returned. Limit can range be
-
#
-
# @param order [Symbol, OpenAI::Models::VectorStores::FileBatchListFilesParams::Order] Query param: Sort order by the `created_at` timestamp of the objects. `asc` for
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::VectorStores::VectorStoreFile>]
-
#
-
# @see OpenAI::Models::VectorStores::FileBatchListFilesParams
-
1
def list_files(batch_id, params)
-
parsed, options = OpenAI::VectorStores::FileBatchListFilesParams.dump_request(params)
-
vector_store_id =
-
parsed.delete(:vector_store_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :get,
-
path: ["vector_stores/%1$s/file_batches/%2$s/files", vector_store_id, batch_id],
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::VectorStores::VectorStoreFile,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
module Resources
-
1
class VectorStores
-
1
class Files
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStores::FileCreateParams} for more details.
-
#
-
# Create a vector store file by attaching a
-
# [File](https://platform.openai.com/docs/api-reference/files) to a
-
# [vector store](https://platform.openai.com/docs/api-reference/vector-stores/object).
-
#
-
# @overload create(vector_store_id, file_id:, attributes: nil, chunking_strategy: nil, request_options: {})
-
#
-
# @param vector_store_id [String] The ID of the vector store for which to create a File.
-
#
-
# @param file_id [String] A [File](https://platform.openai.com/docs/api-reference/files) ID that the vecto
-
#
-
# @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Set of 16 key-value pairs that can be attached to an object. This can be
-
#
-
# @param chunking_strategy [OpenAI::Models::AutoFileChunkingStrategyParam, OpenAI::Models::StaticFileChunkingStrategyObjectParam] The chunking strategy used to chunk the file(s). If not set, will use the `auto`
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::VectorStores::VectorStoreFile]
-
#
-
# @see OpenAI::Models::VectorStores::FileCreateParams
-
1
def create(vector_store_id, params)
-
parsed, options = OpenAI::VectorStores::FileCreateParams.dump_request(params)
-
@client.request(
-
method: :post,
-
path: ["vector_stores/%1$s/files", vector_store_id],
-
body: parsed,
-
model: OpenAI::VectorStores::VectorStoreFile,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# Retrieves a vector store file.
-
#
-
# @overload retrieve(file_id, vector_store_id:, request_options: {})
-
#
-
# @param file_id [String] The ID of the file being retrieved.
-
#
-
# @param vector_store_id [String] The ID of the vector store that the file belongs to.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::VectorStores::VectorStoreFile]
-
#
-
# @see OpenAI::Models::VectorStores::FileRetrieveParams
-
1
def retrieve(file_id, params)
-
parsed, options = OpenAI::VectorStores::FileRetrieveParams.dump_request(params)
-
vector_store_id =
-
parsed.delete(:vector_store_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :get,
-
path: ["vector_stores/%1$s/files/%2$s", vector_store_id, file_id],
-
model: OpenAI::VectorStores::VectorStoreFile,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStores::FileUpdateParams} for more details.
-
#
-
# Update attributes on a vector store file.
-
#
-
# @overload update(file_id, vector_store_id:, attributes:, request_options: {})
-
#
-
# @param file_id [String] Path param: The ID of the file to update attributes.
-
#
-
# @param vector_store_id [String] Path param: The ID of the vector store the file belongs to.
-
#
-
# @param attributes [Hash{Symbol=>String, Float, Boolean}, nil] Body param: Set of 16 key-value pairs that can be attached to an object. This ca
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::VectorStores::VectorStoreFile]
-
#
-
# @see OpenAI::Models::VectorStores::FileUpdateParams
-
1
def update(file_id, params)
-
parsed, options = OpenAI::VectorStores::FileUpdateParams.dump_request(params)
-
vector_store_id =
-
parsed.delete(:vector_store_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :post,
-
path: ["vector_stores/%1$s/files/%2$s", vector_store_id, file_id],
-
body: parsed,
-
model: OpenAI::VectorStores::VectorStoreFile,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# Some parameter documentations has been truncated, see
-
# {OpenAI::Models::VectorStores::FileListParams} for more details.
-
#
-
# Returns a list of vector store files.
-
#
-
# @overload list(vector_store_id, after: nil, before: nil, filter: nil, limit: nil, order: nil, request_options: {})
-
#
-
# @param vector_store_id [String] The ID of the vector store that the files belong to.
-
#
-
# @param after [String] A cursor for use in pagination. `after` is an object ID that defines your place
-
#
-
# @param before [String] A cursor for use in pagination. `before` is an object ID that defines your place
-
#
-
# @param filter [Symbol, OpenAI::Models::VectorStores::FileListParams::Filter] Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`.
-
#
-
# @param limit [Integer] A limit on the number of objects to be returned. Limit can range between 1 and 1
-
#
-
# @param order [Symbol, OpenAI::Models::VectorStores::FileListParams::Order] Sort order by the `created_at` timestamp of the objects. `asc` for ascending ord
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::CursorPage<OpenAI::Models::VectorStores::VectorStoreFile>]
-
#
-
# @see OpenAI::Models::VectorStores::FileListParams
-
1
def list(vector_store_id, params = {})
-
parsed, options = OpenAI::VectorStores::FileListParams.dump_request(params)
-
@client.request(
-
method: :get,
-
path: ["vector_stores/%1$s/files", vector_store_id],
-
query: parsed,
-
page: OpenAI::Internal::CursorPage,
-
model: OpenAI::VectorStores::VectorStoreFile,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# Delete a vector store file. This will remove the file from the vector store but
-
# the file itself will not be deleted. To delete the file, use the
-
# [delete file](https://platform.openai.com/docs/api-reference/files/delete)
-
# endpoint.
-
#
-
# @overload delete(file_id, vector_store_id:, request_options: {})
-
#
-
# @param file_id [String] The ID of the file to delete.
-
#
-
# @param vector_store_id [String] The ID of the vector store that the file belongs to.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Models::VectorStores::VectorStoreFileDeleted]
-
#
-
# @see OpenAI::Models::VectorStores::FileDeleteParams
-
1
def delete(file_id, params)
-
parsed, options = OpenAI::VectorStores::FileDeleteParams.dump_request(params)
-
vector_store_id =
-
parsed.delete(:vector_store_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :delete,
-
path: ["vector_stores/%1$s/files/%2$s", vector_store_id, file_id],
-
model: OpenAI::VectorStores::VectorStoreFileDeleted,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# Retrieve the parsed contents of a vector store file.
-
#
-
# @overload content(file_id, vector_store_id:, request_options: {})
-
#
-
# @param file_id [String] The ID of the file within the vector store.
-
#
-
# @param vector_store_id [String] The ID of the vector store.
-
#
-
# @param request_options [OpenAI::RequestOptions, Hash{Symbol=>Object}, nil]
-
#
-
# @return [OpenAI::Internal::Page<OpenAI::Models::VectorStores::FileContentResponse>]
-
#
-
# @see OpenAI::Models::VectorStores::FileContentParams
-
1
def content(file_id, params)
-
parsed, options = OpenAI::VectorStores::FileContentParams.dump_request(params)
-
vector_store_id =
-
parsed.delete(:vector_store_id) do
-
raise ArgumentError.new("missing required path argument #{_1}")
-
end
-
@client.request(
-
method: :get,
-
path: ["vector_stores/%1$s/files/%2$s/content", vector_store_id, file_id],
-
page: OpenAI::Internal::Page,
-
model: OpenAI::Models::VectorStores::FileContentResponse,
-
options: {extra_headers: {"OpenAI-Beta" => "assistants=v2"}, **options}
-
)
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
require "openssl"
-
1
require "base64"
-
-
1
module OpenAI
-
1
module Resources
-
1
class Webhooks
-
# Validates that the given payload was sent by OpenAI and parses the payload.
-
#
-
# @param payload [String] The raw webhook payload as a string
-
# @param headers [Hash] The webhook headers
-
# @param webhook_secret [String, nil] The webhook secret (optional, will use client webhook secret or ENV["OPENAI_WEBHOOK_SECRET"] if not provided)
-
#
-
# @return [OpenAI::Models::Webhooks::BatchCancelledWebhookEvent, OpenAI::Models::Webhooks::BatchCompletedWebhookEvent, OpenAI::Models::Webhooks::BatchExpiredWebhookEvent, OpenAI::Models::Webhooks::BatchFailedWebhookEvent, OpenAI::Models::Webhooks::EvalRunCanceledWebhookEvent, OpenAI::Models::Webhooks::EvalRunFailedWebhookEvent, OpenAI::Models::Webhooks::EvalRunSucceededWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobCancelledWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobFailedWebhookEvent, OpenAI::Models::Webhooks::FineTuningJobSucceededWebhookEvent, OpenAI::Models::Webhooks::ResponseCancelledWebhookEvent, OpenAI::Models::Webhooks::ResponseCompletedWebhookEvent, OpenAI::Models::Webhooks::ResponseCreatedWebhookEvent, OpenAI::Models::Webhooks::ResponseFailedWebhookEvent, OpenAI::Models::Webhooks::ResponseIncompleteWebhookEvent]
-
#
-
# @raise [ArgumentError] if signature verification fails
-
1
def unwrap(
-
payload,
-
headers = {},
-
webhook_secret = @client.webhook_secret || ENV["OPENAI_WEBHOOK_SECRET"]
-
)
-
verify_signature(payload, headers, webhook_secret)
-
-
parsed = JSON.parse(payload, symbolize_names: true)
-
OpenAI::Internal::Type::Converter.coerce(OpenAI::Models::Webhooks::UnwrapWebhookEvent, parsed)
-
end
-
-
# Validates whether or not the webhook payload was sent by OpenAI.
-
#
-
# @param payload [String] The webhook payload as a string
-
# @param headers [Hash] The webhook headers
-
# @param webhook_secret [String, nil] The webhook secret (optional, will use client webhook secret or ENV["OPENAI_WEBHOOK_SECRET"] if not provided)
-
# @param tolerance [Integer] Maximum age of the webhook in seconds (default: 300 = 5 minutes)
-
#
-
# @raise [ArgumentError] if the signature is invalid
-
1
def verify_signature(
-
payload,
-
headers,
-
webhook_secret = @client.webhook_secret || ENV["OPENAI_WEBHOOK_SECRET"],
-
tolerance = 300
-
)
-
then: 0
else: 0
if webhook_secret.nil?
-
raise ArgumentError,
-
"The webhook secret must either be set using the env var, OPENAI_WEBHOOK_SECRET, " \
-
"or passed to this function"
-
end
-
-
# Extract required headers
-
signature_header = headers["webhook-signature"] || headers[:webhook_signature]
-
timestamp_header = headers["webhook-timestamp"] || headers[:webhook_timestamp]
-
webhook_id = headers["webhook-id"] || headers[:webhook_id]
-
-
then: 0
else: 0
if signature_header.nil?
-
raise ArgumentError, "Missing required webhook-signature header"
-
end
-
-
then: 0
else: 0
if timestamp_header.nil?
-
raise ArgumentError, "Missing required webhook-timestamp header"
-
end
-
-
then: 0
else: 0
if webhook_id.nil?
-
raise ArgumentError, "Missing required webhook-id header"
-
end
-
-
# Validate timestamp to prevent replay attacks
-
begin
-
timestamp_seconds = timestamp_header.to_i
-
rescue ArgumentError
-
raise ArgumentError, "Invalid webhook timestamp format"
-
end
-
-
now = Time.now.to_i
-
-
then: 0
else: 0
if now - timestamp_seconds > tolerance
-
raise OpenAI::Errors::InvalidWebhookSignatureError, "Webhook timestamp is too old"
-
end
-
-
then: 0
else: 0
if timestamp_seconds > now + tolerance
-
raise OpenAI::Errors::InvalidWebhookSignatureError, "Webhook timestamp is too new"
-
end
-
-
# Extract signatures from v1,<base64> format
-
# The signature header can have multiple values, separated by spaces.
-
# Each value is in the format v1,<base64>. We should accept if any match.
-
signatures = signature_header.split.map do |part|
-
then: 0
if part.start_with?("v1,")
-
part[3..]
-
else: 0
else
-
part
-
end
-
end
-
-
# Decode the secret if it starts with whsec_
-
then: 0
decoded_secret = if webhook_secret.start_with?("whsec_")
-
Base64.decode64(webhook_secret[6..])
-
else: 0
else
-
webhook_secret
-
end
-
-
# Create the signed payload: {webhook_id}.{timestamp}.{payload}
-
signed_payload = "#{webhook_id}.#{timestamp_header}.#{payload}"
-
-
# Compute HMAC-SHA256 signature
-
expected_signature = Base64.encode64(
-
OpenSSL::HMAC.digest("sha256", decoded_secret, signed_payload)
-
).strip
-
-
# Accept if any signature matches using timing-safe comparison
-
then: 0
else: 0
return if signatures.any? { |signature| OpenSSL.secure_compare(expected_signature, signature) }
-
-
raise OpenAI::Errors::InvalidWebhookSignatureError,
-
"The given webhook signature does not match the expected signature"
-
end
-
-
# @api private
-
#
-
# @param client [OpenAI::Client]
-
1
def initialize(client:)
-
2
@client = client
-
end
-
end
-
end
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
Streaming = Helpers::Streaming
-
end
-
# frozen_string_literal: true
-
-
1
module OpenAI
-
1
StructuredOutput = OpenAI::Helpers::StructuredOutput
-
1
ArrayOf = OpenAI::Helpers::StructuredOutput::ArrayOf
-
1
BaseModel = OpenAI::Helpers::StructuredOutput::BaseModel
-
1
Boolean = OpenAI::Helpers::StructuredOutput::Boolean
-
1
EnumOf = OpenAI::Helpers::StructuredOutput::EnumOf
-
1
UnionOf = OpenAI::Helpers::StructuredOutput::UnionOf
-
end